uuid
stringlengths
36
36
import_dependencies
list
path
stringlengths
21
94
type
stringclasses
2 values
link
stringlengths
98
248
source_code
stringlengths
38
36.6k
doc_content
stringlengths
0
27.2k
description
stringlengths
0
885
c8c9ef20-5f38-4459-be5b-b5bffc2f5a5a
[ "__future__.annotations", "warnings", "typing.Any", "typing.Iterable", "typing.List", "typing.Optional", "sqlalchemy", "sqlalchemy.MetaData", "sqlalchemy.Table", "sqlalchemy.create_engine", "sqlalchemy.inspect", "sqlalchemy.select", "sqlalchemy.text", "sqlalchemy.engine.Engine", "sqlalchemy.exc.ProgrammingError", "sqlalchemy.exc.SQLAlchemyError", "sqlalchemy.schema.CreateTable", "langchain.utils" ]
langchain.sql_database.truncate_word
Function
https://api.python.langchain.com/en/latest/sql_database/langchain.sql_database.truncate_word.html#langchain.sql_database.truncate_word
def truncate_word(content: Any, *, length: int, suffix: str = "...") -> str: """ Truncate a string to a certain number of words, based on the max string length. """ if not isinstance(content, str) or length <= 0: return content if len(content) <= length: return content return content[: length - len(suffix)].rsplit(" ", 1)[0] + suffix
langchain.sql_database.truncate_word¶ langchain.sql_database.truncate_word(content: Any, *, length: int, suffix: str = '...') → str[source]¶ Truncate a string to a certain number of words, based on the max string length.
Truncate a string to a certain number of words, based on the max string length.
ad508fd3-740d-44e0-affd-03e42d23f689
[ "typing.List", "langchain.chains.llm.LLMChain", "langchain.prompts.few_shot.FewShotPromptTemplate", "langchain.prompts.prompt.PromptTemplate", "langchain.schema.language_model.BaseLanguageModel" ]
langchain.example_generator.generate_example
Function
https://api.python.langchain.com/en/latest/example_generator/langchain.example_generator.generate_example.html#langchain.example_generator.generate_example
def generate_example( examples: List[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate ) -> str: """Return another example given a list of examples for a prompt.""" prompt = FewShotPromptTemplate( examples=examples, suffix=TEST_GEN_TEMPLATE_SUFFIX, input_variables=[], example_prompt=prompt_template, ) chain = LLMChain(llm=llm, prompt=prompt) return chain.predict()
langchain.example_generator.generate_example¶ langchain.example_generator.generate_example(examples: List[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate) → str[source]¶ Return another example given a list of examples for a prompt.
Return another example given a list of examples for a prompt.
3fb7541e-5761-4111-a0c2-d0a6b6f69461
[ "__future__.annotations", "copy", "logging", "re", "abc.ABC", "abc.abstractmethod", "dataclasses.dataclass", "enum.Enum", "typing.AbstractSet", "typing.Any", "typing.Callable", "typing.Collection", "typing.Dict", "typing.Iterable", "typing.List", "typing.Literal", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.TypedDict", "typing.TypeVar", "typing.Union", "typing.cast", "langchain.docstore.document.Document", "langchain.schema.BaseDocumentTransformer", "spacy", "spacy.lang.en.English" ]
langchain.text_splitter.TextSplitter
Class
https://api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.TextSplitter.html#langchain.text_splitter.TextSplitter
class TextSplitter(BaseDocumentTransformer, ABC): """Interface for splitting text into chunks.""" def __init__( self, chunk_size: int = 4000, chunk_overlap: int = 200, length_function: Callable[[str], int] = len, keep_separator: bool = False, add_start_index: bool = False, ) -> None: """Create a new TextSplitter. Args: chunk_size: Maximum size of chunks to return chunk_overlap: Overlap in characters between chunks length_function: Function that measures the length of given chunks keep_separator: Whether to keep the separator in the chunks add_start_index: If `True`, includes chunk's start index in metadata """ if chunk_overlap > chunk_size: raise ValueError( f"Got a larger chunk overlap ({chunk_overlap}) than chunk size " f"({chunk_size}), should be smaller." ) self._chunk_size = chunk_size self._chunk_overlap = chunk_overlap self._length_function = length_function self._keep_separator = keep_separator self._add_start_index = add_start_index @abstractmethod def split_text(self, text: str) -> List[str]: """Split text into multiple components.""" def create_documents( self, texts: List[str], metadatas: Optional[List[dict]] = None ) -> List[Document]: """Create documents from a list of texts.""" _metadatas = metadatas or [{}] * len(texts) documents = [] for i, text in enumerate(texts): index = -1 for chunk in self.split_text(text): metadata = copy.deepcopy(_metadatas[i]) if self._add_start_index: index = text.find(chunk, index + 1) metadata["start_index"] = index new_doc = Document(page_content=chunk, metadata=metadata) documents.append(new_doc) return documents def split_documents(self, documents: Iterable[Document]) -> List[Document]: """Split documents.""" texts, metadatas = [], [] for doc in documents: texts.append(doc.page_content) metadatas.append(doc.metadata) return self.create_documents(texts, metadatas=metadatas) def _join_docs(self, docs: List[str], separator: str) -> Optional[str]: text = separator.join(docs) text = text.strip() if text == "": return None else: return text def _merge_splits(self, splits: Iterable[str], separator: str) -> List[str]: # We now want to combine these smaller pieces into medium size # chunks to send to the LLM. separator_len = self._length_function(separator) docs = [] current_doc: List[str] = [] total = 0 for d in splits: _len = self._length_function(d) if ( total + _len + (separator_len if len(current_doc) > 0 else 0) > self._chunk_size ): if total > self._chunk_size: logger.warning( f"Created a chunk of size {total}, " f"which is longer than the specified {self._chunk_size}" ) if len(current_doc) > 0: doc = self._join_docs(current_doc, separator) if doc is not None: docs.append(doc) # Keep on popping if: # - we have a larger chunk than in the chunk overlap # - or if we still have any chunks and the length is long while total > self._chunk_overlap or ( total + _len + (separator_len if len(current_doc) > 0 else 0) > self._chunk_size and total > 0 ): total -= self._length_function(current_doc[0]) + ( separator_len if len(current_doc) > 1 else 0 ) current_doc = current_doc[1:] current_doc.append(d) total += _len + (separator_len if len(current_doc) > 1 else 0) doc = self._join_docs(current_doc, separator) if doc is not None: docs.append(doc) return docs @classmethod def from_huggingface_tokenizer(cls, tokenizer: Any, **kwargs: Any) -> TextSplitter: """Text splitter that uses HuggingFace tokenizer to count length.""" try: from transformers import PreTrainedTokenizerBase if not isinstance(tokenizer, PreTrainedTokenizerBase): raise ValueError( "Tokenizer received was not an instance of PreTrainedTokenizerBase" ) def _huggingface_tokenizer_length(text: str) -> int: return len(tokenizer.encode(text)) except ImportError: raise ValueError( "Could not import transformers python package. " "Please install it with `pip install transformers`." ) return cls(length_function=_huggingface_tokenizer_length, **kwargs) @classmethod def from_tiktoken_encoder( cls: Type[TS], encoding_name: str = "gpt2", model_name: Optional[str] = None, allowed_special: Union[Literal["all"], AbstractSet[str]] = set(), disallowed_special: Union[Literal["all"], Collection[str]] = "all", **kwargs: Any, ) -> TS: """Text splitter that uses tiktoken encoder to count length.""" try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to calculate max_tokens_for_prompt. " "Please install it with `pip install tiktoken`." ) if model_name is not None: enc = tiktoken.encoding_for_model(model_name) else: enc = tiktoken.get_encoding(encoding_name) def _tiktoken_encoder(text: str) -> int: return len( enc.encode( text, allowed_special=allowed_special, disallowed_special=disallowed_special, ) ) if issubclass(cls, TokenTextSplitter): extra_kwargs = { "encoding_name": encoding_name, "model_name": model_name, "allowed_special": allowed_special, "disallowed_special": disallowed_special, } kwargs = {**kwargs, **extra_kwargs} return cls(length_function=_tiktoken_encoder, **kwargs) def transform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: """Transform sequence of documents by splitting them.""" return self.split_documents(list(documents)) async def atransform_documents( self, documents: Sequence[Document], **kwargs: Any ) -> Sequence[Document]: """Asynchronously transform a sequence of documents by splitting them.""" raise NotImplementedError
langchain.text_splitter.TextSplitter¶ class langchain.text_splitter.TextSplitter(chunk_size: int = 4000, chunk_overlap: int = 200, length_function: ~typing.Callable[[str], int] = <built-in function len>, keep_separator: bool = False, add_start_index: bool = False)[source]¶ Bases: BaseDocumentTransformer, ABC Interface for splitting text into chunks. Create a new TextSplitter. Parameters chunk_size – Maximum size of chunks to return chunk_overlap – Overlap in characters between chunks length_function – Function that measures the length of given chunks keep_separator – Whether to keep the separator in the chunks add_start_index – If True, includes chunk’s start index in metadata Methods __init__([chunk_size, chunk_overlap, ...]) Create a new TextSplitter. atransform_documents(documents, **kwargs) Asynchronously transform a sequence of documents by splitting them. create_documents(texts[, metadatas]) Create documents from a list of texts. from_huggingface_tokenizer(tokenizer, **kwargs) Text splitter that uses HuggingFace tokenizer to count length. from_tiktoken_encoder([encoding_name, ...]) Text splitter that uses tiktoken encoder to count length. split_documents(documents) Split documents. split_text(text) Split text into multiple components. transform_documents(documents, **kwargs) Transform sequence of documents by splitting them. async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document][source]¶ Asynchronously transform a sequence of documents by splitting them. create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document][source]¶ Create documents from a list of texts. classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter[source]¶ Text splitter that uses HuggingFace tokenizer to count length. classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → TS[source]¶ Text splitter that uses tiktoken encoder to count length. split_documents(documents: Iterable[Document]) → List[Document][source]¶ Split documents. abstract split_text(text: str) → List[str][source]¶ Split text into multiple components. transform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document][source]¶ Transform sequence of documents by splitting them.
Interface for splitting text into chunks.
0d2a66b6-c6a5-427d-9860-83534e84af67
[ "__future__.annotations", "copy", "logging", "re", "abc.ABC", "abc.abstractmethod", "dataclasses.dataclass", "enum.Enum", "typing.AbstractSet", "typing.Any", "typing.Callable", "typing.Collection", "typing.Dict", "typing.Iterable", "typing.List", "typing.Literal", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.TypedDict", "typing.TypeVar", "typing.Union", "typing.cast", "langchain.docstore.document.Document", "langchain.schema.BaseDocumentTransformer", "spacy", "spacy.lang.en.English", "transformers.PreTrainedTokenizerBase", "tiktoken" ]
langchain.text_splitter.CharacterTextSplitter
Class
https://api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.CharacterTextSplitter.html#langchain.text_splitter.CharacterTextSplitter
class CharacterTextSplitter(TextSplitter): """Implementation of splitting text that looks at characters.""" def __init__(self, separator: str = "\n\n", **kwargs: Any) -> None: """Create a new TextSplitter.""" super().__init__(**kwargs) self._separator = separator def split_text(self, text: str) -> List[str]: """Split incoming text and return chunks.""" # First we naively split the large input into a bunch of smaller ones. splits = _split_text_with_regex(text, self._separator, self._keep_separator) _separator = "" if self._keep_separator else self._separator return self._merge_splits(splits, _separator)
langchain.text_splitter.CharacterTextSplitter¶ class langchain.text_splitter.CharacterTextSplitter(separator: str = '\n\n', **kwargs: Any)[source]¶ Bases: TextSplitter Implementation of splitting text that looks at characters. Create a new TextSplitter. Methods __init__([separator]) Create a new TextSplitter. atransform_documents(documents, **kwargs) Asynchronously transform a sequence of documents by splitting them. create_documents(texts[, metadatas]) Create documents from a list of texts. from_huggingface_tokenizer(tokenizer, **kwargs) Text splitter that uses HuggingFace tokenizer to count length. from_tiktoken_encoder([encoding_name, ...]) Text splitter that uses tiktoken encoder to count length. split_documents(documents) Split documents. split_text(text) Split incoming text and return chunks. transform_documents(documents, **kwargs) Transform sequence of documents by splitting them. async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Asynchronously transform a sequence of documents by splitting them. create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document]¶ Create documents from a list of texts. classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter¶ Text splitter that uses HuggingFace tokenizer to count length. classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → TS¶ Text splitter that uses tiktoken encoder to count length. split_documents(documents: Iterable[Document]) → List[Document]¶ Split documents. split_text(text: str) → List[str][source]¶ Split incoming text and return chunks. transform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Transform sequence of documents by splitting them.
Implementation of splitting text that looks at characters.
ae01af4a-6665-47a0-96bd-3dcd643fdb2c
[ "__future__.annotations", "copy", "logging", "re", "abc.ABC", "abc.abstractmethod", "dataclasses.dataclass", "enum.Enum", "typing.AbstractSet", "typing.Any", "typing.Callable", "typing.Collection", "typing.Dict", "typing.Iterable", "typing.List", "typing.Literal", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.TypedDict", "typing.TypeVar", "typing.Union", "typing.cast", "langchain.docstore.document.Document", "langchain.schema.BaseDocumentTransformer", "spacy", "spacy.lang.en.English", "transformers.PreTrainedTokenizerBase", "tiktoken" ]
langchain.text_splitter.LineType
Class
https://api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.LineType.html#langchain.text_splitter.LineType
class LineType(TypedDict): """Line type as typed dict.""" metadata: Dict[str, str] content: str
langchain.text_splitter.LineType¶ class langchain.text_splitter.LineType[source]¶ Bases: TypedDict Line type as typed dict. Methods __init__(*args, **kwargs) clear() copy() fromkeys([value]) Create a new dictionary with keys from iterable and values set to value. get(key[, default]) Return the value for key if key is in the dictionary, else default. items() keys() pop(k[,d]) If the key is not found, return the default if given; otherwise, raise a KeyError. popitem() Remove and return a (key, value) pair as a 2-tuple. setdefault(key[, default]) Insert key with a value of default if key is not in the dictionary. update([E, ]**F) If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k] values() Attributes metadata content clear() → None.  Remove all items from D.¶ copy() → a shallow copy of D¶ fromkeys(value=None, /)¶ Create a new dictionary with keys from iterable and values set to value. get(key, default=None, /)¶ Return the value for key if key is in the dictionary, else default. items() → a set-like object providing a view on D's items¶ keys() → a set-like object providing a view on D's keys¶ pop(k[, d]) → v, remove specified key and return the corresponding value.¶ If the key is not found, return the default if given; otherwise, raise a KeyError. popitem()¶ Remove and return a (key, value) pair as a 2-tuple. Pairs are returned in LIFO (last-in, first-out) order. Raises KeyError if the dict is empty. setdefault(key, default=None, /)¶ Insert key with a value of default if key is not in the dictionary. Return the value for key if key is in the dictionary, else default. update([E, ]**F) → None.  Update D from dict/iterable E and F.¶ If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k] values() → an object providing a view on D's values¶ content: str¶ metadata: Dict[str, str]¶
Line type as typed dict.
693dd8d0-ac31-4469-866c-e0908de5da20
[ "__future__.annotations", "copy", "logging", "re", "abc.ABC", "abc.abstractmethod", "dataclasses.dataclass", "enum.Enum", "typing.AbstractSet", "typing.Any", "typing.Callable", "typing.Collection", "typing.Dict", "typing.Iterable", "typing.List", "typing.Literal", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.TypedDict", "typing.TypeVar", "typing.Union", "typing.cast", "langchain.docstore.document.Document", "langchain.schema.BaseDocumentTransformer", "spacy", "spacy.lang.en.English", "transformers.PreTrainedTokenizerBase", "tiktoken" ]
langchain.text_splitter.HeaderType
Class
https://api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.HeaderType.html#langchain.text_splitter.HeaderType
class HeaderType(TypedDict): """Header type as typed dict.""" level: int name: str data: str
langchain.text_splitter.HeaderType¶ class langchain.text_splitter.HeaderType[source]¶ Bases: TypedDict Header type as typed dict. Methods __init__(*args, **kwargs) clear() copy() fromkeys([value]) Create a new dictionary with keys from iterable and values set to value. get(key[, default]) Return the value for key if key is in the dictionary, else default. items() keys() pop(k[,d]) If the key is not found, return the default if given; otherwise, raise a KeyError. popitem() Remove and return a (key, value) pair as a 2-tuple. setdefault(key[, default]) Insert key with a value of default if key is not in the dictionary. update([E, ]**F) If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k] values() Attributes level name data clear() → None.  Remove all items from D.¶ copy() → a shallow copy of D¶ fromkeys(value=None, /)¶ Create a new dictionary with keys from iterable and values set to value. get(key, default=None, /)¶ Return the value for key if key is in the dictionary, else default. items() → a set-like object providing a view on D's items¶ keys() → a set-like object providing a view on D's keys¶ pop(k[, d]) → v, remove specified key and return the corresponding value.¶ If the key is not found, return the default if given; otherwise, raise a KeyError. popitem()¶ Remove and return a (key, value) pair as a 2-tuple. Pairs are returned in LIFO (last-in, first-out) order. Raises KeyError if the dict is empty. setdefault(key, default=None, /)¶ Insert key with a value of default if key is not in the dictionary. Return the value for key if key is in the dictionary, else default. update([E, ]**F) → None.  Update D from dict/iterable E and F.¶ If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k] values() → an object providing a view on D's values¶ data: str¶ level: int¶ name: str¶
Header type as typed dict.
76b20b75-eab9-4aa4-b9eb-b9bb0b026342
[ "__future__.annotations", "copy", "logging", "re", "abc.ABC", "abc.abstractmethod", "dataclasses.dataclass", "enum.Enum", "typing.AbstractSet", "typing.Any", "typing.Callable", "typing.Collection", "typing.Dict", "typing.Iterable", "typing.List", "typing.Literal", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.TypedDict", "typing.TypeVar", "typing.Union", "typing.cast", "langchain.docstore.document.Document", "langchain.schema.BaseDocumentTransformer", "spacy", "spacy.lang.en.English", "transformers.PreTrainedTokenizerBase", "tiktoken" ]
langchain.text_splitter.split_text_on_tokens
Function
https://api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.split_text_on_tokens.html#langchain.text_splitter.split_text_on_tokens
def split_text_on_tokens(*, text: str, tokenizer: Tokenizer) -> List[str]: """Split incoming text and return chunks.""" splits: List[str] = [] input_ids = tokenizer.encode(text) start_idx = 0 cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids)) chunk_ids = input_ids[start_idx:cur_idx] while start_idx < len(input_ids): splits.append(tokenizer.decode(chunk_ids)) start_idx += tokenizer.tokens_per_chunk - tokenizer.chunk_overlap cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids)) chunk_ids = input_ids[start_idx:cur_idx] return splits
langchain.text_splitter.split_text_on_tokens¶ langchain.text_splitter.split_text_on_tokens(*, text: str, tokenizer: Tokenizer) → List[str][source]¶ Split incoming text and return chunks.
Split incoming text and return chunks.
991b21d6-7d0d-4624-877f-092984db4651
[ "__future__.annotations", "copy", "logging", "re", "abc.ABC", "abc.abstractmethod", "dataclasses.dataclass", "enum.Enum", "typing.AbstractSet", "typing.Any", "typing.Callable", "typing.Collection", "typing.Dict", "typing.Iterable", "typing.List", "typing.Literal", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.TypedDict", "typing.TypeVar", "typing.Union", "typing.cast", "langchain.docstore.document.Document", "langchain.schema.BaseDocumentTransformer", "spacy", "spacy.lang.en.English", "transformers.PreTrainedTokenizerBase", "tiktoken" ]
langchain.text_splitter.TokenTextSplitter
Class
https://api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.TokenTextSplitter.html#langchain.text_splitter.TokenTextSplitter
class TokenTextSplitter(TextSplitter): """Implementation of splitting text that looks at tokens.""" def __init__( self, encoding_name: str = "gpt2", model_name: Optional[str] = None, allowed_special: Union[Literal["all"], AbstractSet[str]] = set(), disallowed_special: Union[Literal["all"], Collection[str]] = "all", **kwargs: Any, ) -> None: """Create a new TextSplitter.""" super().__init__(**kwargs) try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for TokenTextSplitter. " "Please install it with `pip install tiktoken`." ) if model_name is not None: enc = tiktoken.encoding_for_model(model_name) else: enc = tiktoken.get_encoding(encoding_name) self._tokenizer = enc self._allowed_special = allowed_special self._disallowed_special = disallowed_special def split_text(self, text: str) -> List[str]: def _encode(_text: str) -> List[int]: return self._tokenizer.encode( _text, allowed_special=self._allowed_special, disallowed_special=self._disallowed_special, ) tokenizer = Tokenizer( chunk_overlap=self._chunk_overlap, tokens_per_chunk=self._chunk_size, decode=self._tokenizer.decode, encode=_encode, ) return split_text_on_tokens(text=text, tokenizer=tokenizer)
langchain.text_splitter.TokenTextSplitter¶ class langchain.text_splitter.TokenTextSplitter(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any)[source]¶ Bases: TextSplitter Implementation of splitting text that looks at tokens. Create a new TextSplitter. Methods __init__([encoding_name, model_name, ...]) Create a new TextSplitter. atransform_documents(documents, **kwargs) Asynchronously transform a sequence of documents by splitting them. create_documents(texts[, metadatas]) Create documents from a list of texts. from_huggingface_tokenizer(tokenizer, **kwargs) Text splitter that uses HuggingFace tokenizer to count length. from_tiktoken_encoder([encoding_name, ...]) Text splitter that uses tiktoken encoder to count length. split_documents(documents) Split documents. split_text(text) Split text into multiple components. transform_documents(documents, **kwargs) Transform sequence of documents by splitting them. async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Asynchronously transform a sequence of documents by splitting them. create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document]¶ Create documents from a list of texts. classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter¶ Text splitter that uses HuggingFace tokenizer to count length. classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → TS¶ Text splitter that uses tiktoken encoder to count length. split_documents(documents: Iterable[Document]) → List[Document]¶ Split documents. split_text(text: str) → List[str][source]¶ Split text into multiple components. transform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Transform sequence of documents by splitting them.
Implementation of splitting text that looks at tokens.
eec99d6f-e6bd-4fd8-bc82-d70a1329f119
[ "__future__.annotations", "copy", "logging", "re", "abc.ABC", "abc.abstractmethod", "dataclasses.dataclass", "enum.Enum", "typing.AbstractSet", "typing.Any", "typing.Callable", "typing.Collection", "typing.Dict", "typing.Iterable", "typing.List", "typing.Literal", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.TypedDict", "typing.TypeVar", "typing.Union", "typing.cast", "langchain.docstore.document.Document", "langchain.schema.BaseDocumentTransformer", "spacy", "spacy.lang.en.English", "transformers.PreTrainedTokenizerBase", "tiktoken", "tiktoken" ]
langchain.text_splitter.SentenceTransformersTokenTextSplitter
Class
https://api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.SentenceTransformersTokenTextSplitter.html#langchain.text_splitter.SentenceTransformersTokenTextSplitter
class SentenceTransformersTokenTextSplitter(TextSplitter): """Implementation of splitting text that looks at tokens.""" def __init__( self, chunk_overlap: int = 50, model_name: str = "sentence-transformers/all-mpnet-base-v2", tokens_per_chunk: Optional[int] = None, **kwargs: Any, ) -> None: """Create a new TextSplitter.""" super().__init__(**kwargs, chunk_overlap=chunk_overlap) try: from sentence_transformers import SentenceTransformer except ImportError: raise ImportError( "Could not import sentence_transformer python package. " "This is needed in order to for SentenceTransformersTokenTextSplitter. " "Please install it with `pip install sentence-transformers`." ) self.model_name = model_name self._model = SentenceTransformer(self.model_name) self.tokenizer = self._model.tokenizer self._initialize_chunk_configuration(tokens_per_chunk=tokens_per_chunk) def _initialize_chunk_configuration( self, *, tokens_per_chunk: Optional[int] ) -> None: self.maximum_tokens_per_chunk = cast(int, self._model.max_seq_length) if tokens_per_chunk is None: self.tokens_per_chunk = self.maximum_tokens_per_chunk else: self.tokens_per_chunk = tokens_per_chunk if self.tokens_per_chunk > self.maximum_tokens_per_chunk: raise ValueError( f"The token limit of the models '{self.model_name}'" f" is: {self.maximum_tokens_per_chunk}." f" Argument tokens_per_chunk={self.tokens_per_chunk}" f" > maximum token limit." ) def split_text(self, text: str) -> List[str]: def encode_strip_start_and_stop_token_ids(text: str) -> List[int]: return self._encode(text)[1:-1] tokenizer = Tokenizer( chunk_overlap=self._chunk_overlap, tokens_per_chunk=self.tokens_per_chunk, decode=self.tokenizer.decode, encode=encode_strip_start_and_stop_token_ids, ) return split_text_on_tokens(text=text, tokenizer=tokenizer) def count_tokens(self, *, text: str) -> int: return len(self._encode(text)) _max_length_equal_32_bit_integer = 2**32 def _encode(self, text: str) -> List[int]: token_ids_with_start_and_end_token_ids = self.tokenizer.encode( text, max_length=self._max_length_equal_32_bit_integer, truncation="do_not_truncate", ) return token_ids_with_start_and_end_token_ids
langchain.text_splitter.SentenceTransformersTokenTextSplitter¶ class langchain.text_splitter.SentenceTransformersTokenTextSplitter(chunk_overlap: int = 50, model_name: str = 'sentence-transformers/all-mpnet-base-v2', tokens_per_chunk: Optional[int] = None, **kwargs: Any)[source]¶ Bases: TextSplitter Implementation of splitting text that looks at tokens. Create a new TextSplitter. Methods __init__([chunk_overlap, model_name, ...]) Create a new TextSplitter. atransform_documents(documents, **kwargs) Asynchronously transform a sequence of documents by splitting them. count_tokens(*, text) create_documents(texts[, metadatas]) Create documents from a list of texts. from_huggingface_tokenizer(tokenizer, **kwargs) Text splitter that uses HuggingFace tokenizer to count length. from_tiktoken_encoder([encoding_name, ...]) Text splitter that uses tiktoken encoder to count length. split_documents(documents) Split documents. split_text(text) Split text into multiple components. transform_documents(documents, **kwargs) Transform sequence of documents by splitting them. async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Asynchronously transform a sequence of documents by splitting them. count_tokens(*, text: str) → int[source]¶ create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document]¶ Create documents from a list of texts. classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter¶ Text splitter that uses HuggingFace tokenizer to count length. classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → TS¶ Text splitter that uses tiktoken encoder to count length. split_documents(documents: Iterable[Document]) → List[Document]¶ Split documents. split_text(text: str) → List[str][source]¶ Split text into multiple components. transform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Transform sequence of documents by splitting them.
Implementation of splitting text that looks at tokens.
8135559b-a4d4-43a5-ab8c-9f4b3c8578a4
[ "__future__.annotations", "copy", "logging", "re", "abc.ABC", "abc.abstractmethod", "dataclasses.dataclass", "enum.Enum", "typing.AbstractSet", "typing.Any", "typing.Callable", "typing.Collection", "typing.Dict", "typing.Iterable", "typing.List", "typing.Literal", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.TypedDict", "typing.TypeVar", "typing.Union", "typing.cast", "langchain.docstore.document.Document", "langchain.schema.BaseDocumentTransformer", "spacy", "spacy.lang.en.English", "transformers.PreTrainedTokenizerBase", "tiktoken", "tiktoken", "sentence_transformers.SentenceTransformer" ]
langchain.text_splitter.Language
Class
https://api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.Language.html#langchain.text_splitter.Language
class Language(str, Enum): """Enum of the programming languages.""" CPP = "cpp" GO = "go" JAVA = "java" JS = "js" PHP = "php" PROTO = "proto" PYTHON = "python" RST = "rst" RUBY = "ruby" RUST = "rust" SCALA = "scala" SWIFT = "swift" MARKDOWN = "markdown" LATEX = "latex" HTML = "html" SOL = "sol"
langchain.text_splitter.Language¶ class langchain.text_splitter.Language(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)[source]¶ Bases: str, Enum Enum of the programming languages. Methods __init__(*args, **kwds) capitalize() Return a capitalized version of the string. casefold() Return a version of the string suitable for caseless comparisons. center(width[, fillchar]) Return a centered string of length width. count(sub[, start[, end]]) Return the number of non-overlapping occurrences of substring sub in string S[start:end]. encode([encoding, errors]) Encode the string using the codec registered for encoding. endswith(suffix[, start[, end]]) Return True if S ends with the specified suffix, False otherwise. expandtabs([tabsize]) Return a copy where all tab characters are expanded using spaces. find(sub[, start[, end]]) Return the lowest index in S where substring sub is found, such that sub is contained within S[start:end]. format(*args, **kwargs) Return a formatted version of S, using substitutions from args and kwargs. format_map(mapping) Return a formatted version of S, using substitutions from mapping. index(sub[, start[, end]]) Return the lowest index in S where substring sub is found, such that sub is contained within S[start:end]. isalnum() Return True if the string is an alpha-numeric string, False otherwise. isalpha() Return True if the string is an alphabetic string, False otherwise. isascii() Return True if all characters in the string are ASCII, False otherwise. isdecimal() Return True if the string is a decimal string, False otherwise. isdigit() Return True if the string is a digit string, False otherwise. isidentifier() Return True if the string is a valid Python identifier, False otherwise. islower() Return True if the string is a lowercase string, False otherwise. isnumeric() Return True if the string is a numeric string, False otherwise. isprintable() Return True if the string is printable, False otherwise. isspace() Return True if the string is a whitespace string, False otherwise. istitle() Return True if the string is a title-cased string, False otherwise. isupper() Return True if the string is an uppercase string, False otherwise. join(iterable, /) Concatenate any number of strings. ljust(width[, fillchar]) Return a left-justified string of length width. lower() Return a copy of the string converted to lowercase. lstrip([chars]) Return a copy of the string with leading whitespace removed. maketrans Return a translation table usable for str.translate(). partition(sep, /) Partition the string into three parts using the given separator. removeprefix(prefix, /) Return a str with the given prefix string removed if present. removesuffix(suffix, /) Return a str with the given suffix string removed if present. replace(old, new[, count]) Return a copy with all occurrences of substring old replaced by new. rfind(sub[, start[, end]]) Return the highest index in S where substring sub is found, such that sub is contained within S[start:end]. rindex(sub[, start[, end]]) Return the highest index in S where substring sub is found, such that sub is contained within S[start:end]. rjust(width[, fillchar]) Return a right-justified string of length width. rpartition(sep, /) Partition the string into three parts using the given separator. rsplit([sep, maxsplit]) Return a list of the substrings in the string, using sep as the separator string. rstrip([chars]) Return a copy of the string with trailing whitespace removed. split([sep, maxsplit]) Return a list of the substrings in the string, using sep as the separator string. splitlines([keepends]) Return a list of the lines in the string, breaking at line boundaries. startswith(prefix[, start[, end]]) Return True if S starts with the specified prefix, False otherwise. strip([chars]) Return a copy of the string with leading and trailing whitespace removed. swapcase() Convert uppercase characters to lowercase and lowercase characters to uppercase. title() Return a version of the string where each word is titlecased. translate(table, /) Replace each character in the string using the given translation table. upper() Return a copy of the string converted to uppercase. zfill(width, /) Pad a numeric string with zeros on the left, to fill a field of the given width. Attributes CPP GO JAVA JS PHP PROTO PYTHON RST RUBY RUST SCALA SWIFT MARKDOWN LATEX HTML SOL capitalize()¶ Return a capitalized version of the string. More specifically, make the first character have upper case and the rest lower case. casefold()¶ Return a version of the string suitable for caseless comparisons. center(width, fillchar=' ', /)¶ Return a centered string of length width. Padding is done using the specified fill character (default is a space). count(sub[, start[, end]]) → int¶ Return the number of non-overlapping occurrences of substring sub in string S[start:end]. Optional arguments start and end are interpreted as in slice notation. encode(encoding='utf-8', errors='strict')¶ Encode the string using the codec registered for encoding. encodingThe encoding in which to encode the string. errorsThe error handling scheme to use for encoding errors. The default is ‘strict’ meaning that encoding errors raise a UnicodeEncodeError. Other possible values are ‘ignore’, ‘replace’ and ‘xmlcharrefreplace’ as well as any other name registered with codecs.register_error that can handle UnicodeEncodeErrors. endswith(suffix[, start[, end]]) → bool¶ Return True if S ends with the specified suffix, False otherwise. With optional start, test S beginning at that position. With optional end, stop comparing S at that position. suffix can also be a tuple of strings to try. expandtabs(tabsize=8)¶ Return a copy where all tab characters are expanded using spaces. If tabsize is not given, a tab size of 8 characters is assumed. find(sub[, start[, end]]) → int¶ Return the lowest index in S where substring sub is found, such that sub is contained within S[start:end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure. format(*args, **kwargs) → str¶ Return a formatted version of S, using substitutions from args and kwargs. The substitutions are identified by braces (‘{’ and ‘}’). format_map(mapping) → str¶ Return a formatted version of S, using substitutions from mapping. The substitutions are identified by braces (‘{’ and ‘}’). index(sub[, start[, end]]) → int¶ Return the lowest index in S where substring sub is found, such that sub is contained within S[start:end]. Optional arguments start and end are interpreted as in slice notation. Raises ValueError when the substring is not found. isalnum()¶ Return True if the string is an alpha-numeric string, False otherwise. A string is alpha-numeric if all characters in the string are alpha-numeric and there is at least one character in the string. isalpha()¶ Return True if the string is an alphabetic string, False otherwise. A string is alphabetic if all characters in the string are alphabetic and there is at least one character in the string. isascii()¶ Return True if all characters in the string are ASCII, False otherwise. ASCII characters have code points in the range U+0000-U+007F. Empty string is ASCII too. isdecimal()¶ Return True if the string is a decimal string, False otherwise. A string is a decimal string if all characters in the string are decimal and there is at least one character in the string. isdigit()¶ Return True if the string is a digit string, False otherwise. A string is a digit string if all characters in the string are digits and there is at least one character in the string. isidentifier()¶ Return True if the string is a valid Python identifier, False otherwise. Call keyword.iskeyword(s) to test whether string s is a reserved identifier, such as “def” or “class”. islower()¶ Return True if the string is a lowercase string, False otherwise. A string is lowercase if all cased characters in the string are lowercase and there is at least one cased character in the string. isnumeric()¶ Return True if the string is a numeric string, False otherwise. A string is numeric if all characters in the string are numeric and there is at least one character in the string. isprintable()¶ Return True if the string is printable, False otherwise. A string is printable if all of its characters are considered printable in repr() or if it is empty. isspace()¶ Return True if the string is a whitespace string, False otherwise. A string is whitespace if all characters in the string are whitespace and there is at least one character in the string. istitle()¶ Return True if the string is a title-cased string, False otherwise. In a title-cased string, upper- and title-case characters may only follow uncased characters and lowercase characters only cased ones. isupper()¶ Return True if the string is an uppercase string, False otherwise. A string is uppercase if all cased characters in the string are uppercase and there is at least one cased character in the string. join(iterable, /)¶ Concatenate any number of strings. The string whose method is called is inserted in between each given string. The result is returned as a new string. Example: ‘.’.join([‘ab’, ‘pq’, ‘rs’]) -> ‘ab.pq.rs’ ljust(width, fillchar=' ', /)¶ Return a left-justified string of length width. Padding is done using the specified fill character (default is a space). lower()¶ Return a copy of the string converted to lowercase. lstrip(chars=None, /)¶ Return a copy of the string with leading whitespace removed. If chars is given and not None, remove characters in chars instead. static maketrans()¶ Return a translation table usable for str.translate(). If there is only one argument, it must be a dictionary mapping Unicode ordinals (integers) or characters to Unicode ordinals, strings or None. Character keys will be then converted to ordinals. If there are two arguments, they must be strings of equal length, and in the resulting dictionary, each character in x will be mapped to the character at the same position in y. If there is a third argument, it must be a string, whose characters will be mapped to None in the result. partition(sep, /)¶ Partition the string into three parts using the given separator. This will search for the separator in the string. If the separator is found, returns a 3-tuple containing the part before the separator, the separator itself, and the part after it. If the separator is not found, returns a 3-tuple containing the original string and two empty strings. removeprefix(prefix, /)¶ Return a str with the given prefix string removed if present. If the string starts with the prefix string, return string[len(prefix):]. Otherwise, return a copy of the original string. removesuffix(suffix, /)¶ Return a str with the given suffix string removed if present. If the string ends with the suffix string and that suffix is not empty, return string[:-len(suffix)]. Otherwise, return a copy of the original string. replace(old, new, count=- 1, /)¶ Return a copy with all occurrences of substring old replaced by new. countMaximum number of occurrences to replace. -1 (the default value) means replace all occurrences. If the optional argument count is given, only the first count occurrences are replaced. rfind(sub[, start[, end]]) → int¶ Return the highest index in S where substring sub is found, such that sub is contained within S[start:end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure. rindex(sub[, start[, end]]) → int¶ Return the highest index in S where substring sub is found, such that sub is contained within S[start:end]. Optional arguments start and end are interpreted as in slice notation. Raises ValueError when the substring is not found. rjust(width, fillchar=' ', /)¶ Return a right-justified string of length width. Padding is done using the specified fill character (default is a space). rpartition(sep, /)¶ Partition the string into three parts using the given separator. This will search for the separator in the string, starting at the end. If the separator is found, returns a 3-tuple containing the part before the separator, the separator itself, and the part after it. If the separator is not found, returns a 3-tuple containing two empty strings and the original string. rsplit(sep=None, maxsplit=- 1)¶ Return a list of the substrings in the string, using sep as the separator string. sepThe separator used to split the string. When set to None (the default value), will split on any whitespace character (including \n \r \t \f and spaces) and will discard empty strings from the result. maxsplitMaximum number of splits (starting from the left). -1 (the default value) means no limit. Splitting starts at the end of the string and works to the front. rstrip(chars=None, /)¶ Return a copy of the string with trailing whitespace removed. If chars is given and not None, remove characters in chars instead. split(sep=None, maxsplit=- 1)¶ Return a list of the substrings in the string, using sep as the separator string. sepThe separator used to split the string. When set to None (the default value), will split on any whitespace character (including \n \r \t \f and spaces) and will discard empty strings from the result. maxsplitMaximum number of splits (starting from the left). -1 (the default value) means no limit. Note, str.split() is mainly useful for data that has been intentionally delimited. With natural text that includes punctuation, consider using the regular expression module. splitlines(keepends=False)¶ Return a list of the lines in the string, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true. startswith(prefix[, start[, end]]) → bool¶ Return True if S starts with the specified prefix, False otherwise. With optional start, test S beginning at that position. With optional end, stop comparing S at that position. prefix can also be a tuple of strings to try. strip(chars=None, /)¶ Return a copy of the string with leading and trailing whitespace removed. If chars is given and not None, remove characters in chars instead. swapcase()¶ Convert uppercase characters to lowercase and lowercase characters to uppercase. title()¶ Return a version of the string where each word is titlecased. More specifically, words start with uppercased characters and all remaining cased characters have lower case. translate(table, /)¶ Replace each character in the string using the given translation table. tableTranslation table, which must be a mapping of Unicode ordinals to Unicode ordinals, strings, or None. The table must implement lookup/indexing via __getitem__, for instance a dictionary or list. If this operation raises LookupError, the character is left untouched. Characters mapped to None are deleted. upper()¶ Return a copy of the string converted to uppercase. zfill(width, /)¶ Pad a numeric string with zeros on the left, to fill a field of the given width. The string is never truncated. CPP = 'cpp'¶ GO = 'go'¶ HTML = 'html'¶ JAVA = 'java'¶ JS = 'js'¶ LATEX = 'latex'¶ MARKDOWN = 'markdown'¶ PHP = 'php'¶ PROTO = 'proto'¶ PYTHON = 'python'¶ RST = 'rst'¶ RUBY = 'ruby'¶ RUST = 'rust'¶ SCALA = 'scala'¶ SOL = 'sol'¶ SWIFT = 'swift'¶
Enum of the programming languages.
968e4c9d-9237-41ad-bcf7-1090dc3aee04
[ "__future__.annotations", "copy", "logging", "re", "abc.ABC", "abc.abstractmethod", "dataclasses.dataclass", "enum.Enum", "typing.AbstractSet", "typing.Any", "typing.Callable", "typing.Collection", "typing.Dict", "typing.Iterable", "typing.List", "typing.Literal", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.TypedDict", "typing.TypeVar", "typing.Union", "typing.cast", "langchain.docstore.document.Document", "langchain.schema.BaseDocumentTransformer", "spacy", "spacy.lang.en.English", "transformers.PreTrainedTokenizerBase", "tiktoken", "tiktoken", "sentence_transformers.SentenceTransformer" ]
langchain.text_splitter.RecursiveCharacterTextSplitter
Class
https://api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.RecursiveCharacterTextSplitter.html#langchain.text_splitter.RecursiveCharacterTextSplitter
class RecursiveCharacterTextSplitter(TextSplitter): """Implementation of splitting text that looks at characters. Recursively tries to split by different characters to find one that works. """ def __init__( self, separators: Optional[List[str]] = None, keep_separator: bool = True, **kwargs: Any, ) -> None: """Create a new TextSplitter.""" super().__init__(keep_separator=keep_separator, **kwargs) self._separators = separators or ["\n\n", "\n", " ", ""] def _split_text(self, text: str, separators: List[str]) -> List[str]: """Split incoming text and return chunks.""" final_chunks = [] # Get appropriate separator to use separator = separators[-1] new_separators = [] for i, _s in enumerate(separators): if _s == "": separator = _s break if re.search(_s, text): separator = _s new_separators = separators[i + 1 :] break splits = _split_text_with_regex(text, separator, self._keep_separator) # Now go merging things, recursively splitting longer texts. _good_splits = [] _separator = "" if self._keep_separator else separator for s in splits: if self._length_function(s) < self._chunk_size: _good_splits.append(s) else: if _good_splits: merged_text = self._merge_splits(_good_splits, _separator) final_chunks.extend(merged_text) _good_splits = [] if not new_separators: final_chunks.append(s) else: other_info = self._split_text(s, new_separators) final_chunks.extend(other_info) if _good_splits: merged_text = self._merge_splits(_good_splits, _separator) final_chunks.extend(merged_text) return final_chunks def split_text(self, text: str) -> List[str]: return self._split_text(text, self._separators) @classmethod def from_language( cls, language: Language, **kwargs: Any ) -> RecursiveCharacterTextSplitter: separators = cls.get_separators_for_language(language) return cls(separators=separators, **kwargs) @staticmethod def get_separators_for_language(language: Language) -> List[str]: if language == Language.CPP: return [ # Split along class definitions "\nclass ", # Split along function definitions "\nvoid ", "\nint ", "\nfloat ", "\ndouble ", # Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\nswitch ", "\ncase ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.GO: return [ # Split along function definitions "\nfunc ", "\nvar ", "\nconst ", "\ntype ", # Split along control flow statements "\nif ", "\nfor ", "\nswitch ", "\ncase ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.JAVA: return [ # Split along class definitions "\nclass ", # Split along method definitions "\npublic ", "\nprotected ", "\nprivate ", "\nstatic ", # Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\nswitch ", "\ncase ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.JS: return [ # Split along function definitions "\nfunction ", "\nconst ", "\nlet ", "\nvar ", "\nclass ", # Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\nswitch ", "\ncase ", "\ndefault ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.PHP: return [ # Split along function definitions "\nfunction ", # Split along class definitions "\nclass ", # Split along control flow statements "\nif ", "\nforeach ", "\nwhile ", "\ndo ", "\nswitch ", "\ncase ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.PROTO: return [ # Split along message definitions "\nmessage ", # Split along service definitions "\nservice ", # Split along enum definitions "\nenum ", # Split along option definitions "\noption ", # Split along import statements "\nimport ", # Split along syntax declarations "\nsyntax ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.PYTHON: return [ # First, try to split along class definitions "\nclass ", "\ndef ", "\n\tdef ", # Now split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.RST: return [ # Split along section titles "\n=+\n", "\n-+\n", "\n\*+\n", # Split along directive markers "\n\n.. *\n\n", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.RUBY: return [ # Split along method definitions "\ndef ", "\nclass ", # Split along control flow statements "\nif ", "\nunless ", "\nwhile ", "\nfor ", "\ndo ", "\nbegin ", "\nrescue ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.RUST: return [ # Split along function definitions "\nfn ", "\nconst ", "\nlet ", # Split along control flow statements "\nif ", "\nwhile ", "\nfor ", "\nloop ", "\nmatch ", "\nconst ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.SCALA: return [ # Split along class definitions "\nclass ", "\nobject ", # Split along method definitions "\ndef ", "\nval ", "\nvar ", # Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\nmatch ", "\ncase ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.SWIFT: return [ # Split along function definitions "\nfunc ", # Split along class definitions "\nclass ", "\nstruct ", "\nenum ", # Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\ndo ", "\nswitch ", "\ncase ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] elif language == Language.MARKDOWN: return [ # First, try to split along Markdown headings (starting with level 2) "\n#{1,6} ", # Note the alternative syntax for headings (below) is not handled here # Heading level 2 # --------------- # End of code block "```\n", # Horizontal lines "\n\*\*\*+\n", "\n---+\n", "\n___+\n", # Note that this splitter doesn't handle horizontal lines defined # by *three or more* of ***, ---, or ___, but this is not handled "\n\n", "\n", " ", "", ] elif language == Language.LATEX: return [ # First, try to split along Latex sections "\n\\\chapter{", "\n\\\section{", "\n\\\subsection{", "\n\\\subsubsection{", # Now split by environments "\n\\\begin{enumerate}", "\n\\\begin{itemize}", "\n\\\begin{description}", "\n\\\begin{list}", "\n\\\begin{quote}", "\n\\\begin{quotation}", "\n\\\begin{verse}", "\n\\\begin{verbatim}", # Now split by math environments "\n\\\begin{align}", "$$", "$", # Now split by the normal type of lines " ", "", ] elif language == Language.HTML: return [ # First, try to split along HTML tags "<body", "<div", "<p", "<br", "<li", "<h1", "<h2", "<h3", "<h4", "<h5", "<h6", "<span", "<table", "<tr", "<td", "<th", "<ul", "<ol", "<header", "<footer", "<nav", # Head "<head", "<style", "<script", "<meta", "<title", "", ] elif language == Language.SOL: return [ # Split along compiler information definitions "\npragma ", "\nusing ", # Split along contract definitions "\ncontract ", "\ninterface ", "\nlibrary ", # Split along method definitions "\nconstructor ", "\ntype ", "\nfunction ", "\nevent ", "\nmodifier ", "\nerror ", "\nstruct ", "\nenum ", # Split along control flow statements "\nif ", "\nfor ", "\nwhile ", "\ndo while ", "\nassembly ", # Split by the normal type of lines "\n\n", "\n", " ", "", ] else: raise ValueError( f"Language {language} is not supported! " f"Please choose from {list(Language)}" )
langchain.text_splitter.RecursiveCharacterTextSplitter¶ class langchain.text_splitter.RecursiveCharacterTextSplitter(separators: Optional[List[str]] = None, keep_separator: bool = True, **kwargs: Any)[source]¶ Bases: TextSplitter Implementation of splitting text that looks at characters. Recursively tries to split by different characters to find one that works. Create a new TextSplitter. Methods __init__([separators, keep_separator]) Create a new TextSplitter. atransform_documents(documents, **kwargs) Asynchronously transform a sequence of documents by splitting them. create_documents(texts[, metadatas]) Create documents from a list of texts. from_huggingface_tokenizer(tokenizer, **kwargs) Text splitter that uses HuggingFace tokenizer to count length. from_language(language, **kwargs) from_tiktoken_encoder([encoding_name, ...]) Text splitter that uses tiktoken encoder to count length. get_separators_for_language(language) split_documents(documents) Split documents. split_text(text) Split text into multiple components. transform_documents(documents, **kwargs) Transform sequence of documents by splitting them. async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Asynchronously transform a sequence of documents by splitting them. create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document]¶ Create documents from a list of texts. classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter¶ Text splitter that uses HuggingFace tokenizer to count length. classmethod from_language(language: Language, **kwargs: Any) → RecursiveCharacterTextSplitter[source]¶ classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → TS¶ Text splitter that uses tiktoken encoder to count length. static get_separators_for_language(language: Language) → List[str][source]¶ split_documents(documents: Iterable[Document]) → List[Document]¶ Split documents. split_text(text: str) → List[str][source]¶ Split text into multiple components. transform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Transform sequence of documents by splitting them.
Implementation of splitting text that looks at characters.
b212c8ac-18f4-4d6f-8ae7-2cd68e34ce7f
[ "__future__.annotations", "copy", "logging", "re", "abc.ABC", "abc.abstractmethod", "dataclasses.dataclass", "enum.Enum", "typing.AbstractSet", "typing.Any", "typing.Callable", "typing.Collection", "typing.Dict", "typing.Iterable", "typing.List", "typing.Literal", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.TypedDict", "typing.TypeVar", "typing.Union", "typing.cast", "langchain.docstore.document.Document", "langchain.schema.BaseDocumentTransformer", "spacy", "spacy.lang.en.English", "transformers.PreTrainedTokenizerBase", "tiktoken", "tiktoken", "sentence_transformers.SentenceTransformer" ]
langchain.text_splitter.NLTKTextSplitter
Class
https://api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.NLTKTextSplitter.html#langchain.text_splitter.NLTKTextSplitter
class NLTKTextSplitter(TextSplitter): """Implementation of splitting text that looks at sentences using NLTK.""" def __init__(self, separator: str = "\n\n", **kwargs: Any) -> None: """Initialize the NLTK splitter.""" super().__init__(**kwargs) try: from nltk.tokenize import sent_tokenize self._tokenizer = sent_tokenize except ImportError: raise ImportError( "NLTK is not installed, please install it with `pip install nltk`." ) self._separator = separator def split_text(self, text: str) -> List[str]: """Split incoming text and return chunks.""" # First we naively split the large input into a bunch of smaller ones. splits = self._tokenizer(text) return self._merge_splits(splits, self._separator)
langchain.text_splitter.NLTKTextSplitter¶ class langchain.text_splitter.NLTKTextSplitter(separator: str = '\n\n', **kwargs: Any)[source]¶ Bases: TextSplitter Implementation of splitting text that looks at sentences using NLTK. Initialize the NLTK splitter. Methods __init__([separator]) Initialize the NLTK splitter. atransform_documents(documents, **kwargs) Asynchronously transform a sequence of documents by splitting them. create_documents(texts[, metadatas]) Create documents from a list of texts. from_huggingface_tokenizer(tokenizer, **kwargs) Text splitter that uses HuggingFace tokenizer to count length. from_tiktoken_encoder([encoding_name, ...]) Text splitter that uses tiktoken encoder to count length. split_documents(documents) Split documents. split_text(text) Split incoming text and return chunks. transform_documents(documents, **kwargs) Transform sequence of documents by splitting them. async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Asynchronously transform a sequence of documents by splitting them. create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document]¶ Create documents from a list of texts. classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter¶ Text splitter that uses HuggingFace tokenizer to count length. classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → TS¶ Text splitter that uses tiktoken encoder to count length. split_documents(documents: Iterable[Document]) → List[Document]¶ Split documents. split_text(text: str) → List[str][source]¶ Split incoming text and return chunks. transform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Transform sequence of documents by splitting them.
Implementation of splitting text that looks at sentences using NLTK.
c1649f3b-83bf-47c7-b02d-dfa0ddd27a10
[ "__future__.annotations", "copy", "logging", "re", "abc.ABC", "abc.abstractmethod", "dataclasses.dataclass", "enum.Enum", "typing.AbstractSet", "typing.Any", "typing.Callable", "typing.Collection", "typing.Dict", "typing.Iterable", "typing.List", "typing.Literal", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.TypedDict", "typing.TypeVar", "typing.Union", "typing.cast", "langchain.docstore.document.Document", "langchain.schema.BaseDocumentTransformer", "spacy", "spacy.lang.en.English", "transformers.PreTrainedTokenizerBase", "tiktoken", "tiktoken", "sentence_transformers.SentenceTransformer", "nltk.tokenize.sent_tokenize" ]
langchain.text_splitter.SpacyTextSplitter
Class
https://api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.SpacyTextSplitter.html#langchain.text_splitter.SpacyTextSplitter
class SpacyTextSplitter(TextSplitter): """Implementation of splitting text that looks at sentences using Spacy. Per default, Spacy's `en_core_web_sm` model is used. For a faster, but potentially less accurate splitting, you can use `pipeline='sentencizer'`. """ def __init__( self, separator: str = "\n\n", pipeline: str = "en_core_web_sm", **kwargs: Any ) -> None: """Initialize the spacy text splitter.""" super().__init__(**kwargs) self._tokenizer = _make_spacy_pipeline_for_splitting(pipeline) self._separator = separator def split_text(self, text: str) -> List[str]: """Split incoming text and return chunks.""" splits = (s.text for s in self._tokenizer(text).sents) return self._merge_splits(splits, self._separator)
langchain.text_splitter.SpacyTextSplitter¶ class langchain.text_splitter.SpacyTextSplitter(separator: str = '\n\n', pipeline: str = 'en_core_web_sm', **kwargs: Any)[source]¶ Bases: TextSplitter Implementation of splitting text that looks at sentences using Spacy. Per default, Spacy’s en_core_web_sm model is used. For a faster, but potentially less accurate splitting, you can use pipeline=’sentencizer’. Initialize the spacy text splitter. Methods __init__([separator, pipeline]) Initialize the spacy text splitter. atransform_documents(documents, **kwargs) Asynchronously transform a sequence of documents by splitting them. create_documents(texts[, metadatas]) Create documents from a list of texts. from_huggingface_tokenizer(tokenizer, **kwargs) Text splitter that uses HuggingFace tokenizer to count length. from_tiktoken_encoder([encoding_name, ...]) Text splitter that uses tiktoken encoder to count length. split_documents(documents) Split documents. split_text(text) Split incoming text and return chunks. transform_documents(documents, **kwargs) Transform sequence of documents by splitting them. async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Asynchronously transform a sequence of documents by splitting them. create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document]¶ Create documents from a list of texts. classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter¶ Text splitter that uses HuggingFace tokenizer to count length. classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → TS¶ Text splitter that uses tiktoken encoder to count length. split_documents(documents: Iterable[Document]) → List[Document]¶ Split documents. split_text(text: str) → List[str][source]¶ Split incoming text and return chunks. transform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Transform sequence of documents by splitting them.
Implementation of splitting text that looks at sentences using Spacy.
25a1de84-9630-4287-b4cb-b67d46a25a8f
[ "__future__.annotations", "copy", "logging", "re", "abc.ABC", "abc.abstractmethod", "dataclasses.dataclass", "enum.Enum", "typing.AbstractSet", "typing.Any", "typing.Callable", "typing.Collection", "typing.Dict", "typing.Iterable", "typing.List", "typing.Literal", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.TypedDict", "typing.TypeVar", "typing.Union", "typing.cast", "langchain.docstore.document.Document", "langchain.schema.BaseDocumentTransformer", "spacy", "spacy.lang.en.English", "transformers.PreTrainedTokenizerBase", "tiktoken", "tiktoken", "sentence_transformers.SentenceTransformer", "nltk.tokenize.sent_tokenize" ]
langchain.text_splitter.PythonCodeTextSplitter
Class
https://api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.PythonCodeTextSplitter.html#langchain.text_splitter.PythonCodeTextSplitter
class PythonCodeTextSplitter(RecursiveCharacterTextSplitter): """Attempts to split the text along Python syntax.""" def __init__(self, **kwargs: Any) -> None: """Initialize a PythonCodeTextSplitter.""" separators = self.get_separators_for_language(Language.PYTHON) super().__init__(separators=separators, **kwargs)
langchain.text_splitter.PythonCodeTextSplitter¶ class langchain.text_splitter.PythonCodeTextSplitter(**kwargs: Any)[source]¶ Bases: RecursiveCharacterTextSplitter Attempts to split the text along Python syntax. Initialize a PythonCodeTextSplitter. Methods __init__(**kwargs) Initialize a PythonCodeTextSplitter. atransform_documents(documents, **kwargs) Asynchronously transform a sequence of documents by splitting them. create_documents(texts[, metadatas]) Create documents from a list of texts. from_huggingface_tokenizer(tokenizer, **kwargs) Text splitter that uses HuggingFace tokenizer to count length. from_language(language, **kwargs) from_tiktoken_encoder([encoding_name, ...]) Text splitter that uses tiktoken encoder to count length. get_separators_for_language(language) split_documents(documents) Split documents. split_text(text) Split text into multiple components. transform_documents(documents, **kwargs) Transform sequence of documents by splitting them. async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Asynchronously transform a sequence of documents by splitting them. create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document]¶ Create documents from a list of texts. classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter¶ Text splitter that uses HuggingFace tokenizer to count length. classmethod from_language(language: Language, **kwargs: Any) → RecursiveCharacterTextSplitter¶ classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → TS¶ Text splitter that uses tiktoken encoder to count length. static get_separators_for_language(language: Language) → List[str]¶ split_documents(documents: Iterable[Document]) → List[Document]¶ Split documents. split_text(text: str) → List[str]¶ Split text into multiple components. transform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Transform sequence of documents by splitting them.
Attempts to split the text along Python syntax.
68217e4a-9929-4c8e-9893-d2be3c76b322
[ "__future__.annotations", "copy", "logging", "re", "abc.ABC", "abc.abstractmethod", "dataclasses.dataclass", "enum.Enum", "typing.AbstractSet", "typing.Any", "typing.Callable", "typing.Collection", "typing.Dict", "typing.Iterable", "typing.List", "typing.Literal", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.TypedDict", "typing.TypeVar", "typing.Union", "typing.cast", "langchain.docstore.document.Document", "langchain.schema.BaseDocumentTransformer", "spacy", "spacy.lang.en.English", "transformers.PreTrainedTokenizerBase", "tiktoken", "tiktoken", "sentence_transformers.SentenceTransformer", "nltk.tokenize.sent_tokenize" ]
langchain.text_splitter.MarkdownTextSplitter
Class
https://api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.MarkdownTextSplitter.html#langchain.text_splitter.MarkdownTextSplitter
class MarkdownTextSplitter(RecursiveCharacterTextSplitter): """Attempts to split the text along Markdown-formatted headings.""" def __init__(self, **kwargs: Any) -> None: """Initialize a MarkdownTextSplitter.""" separators = self.get_separators_for_language(Language.MARKDOWN) super().__init__(separators=separators, **kwargs)
langchain.text_splitter.MarkdownTextSplitter¶ class langchain.text_splitter.MarkdownTextSplitter(**kwargs: Any)[source]¶ Bases: RecursiveCharacterTextSplitter Attempts to split the text along Markdown-formatted headings. Initialize a MarkdownTextSplitter. Methods __init__(**kwargs) Initialize a MarkdownTextSplitter. atransform_documents(documents, **kwargs) Asynchronously transform a sequence of documents by splitting them. create_documents(texts[, metadatas]) Create documents from a list of texts. from_huggingface_tokenizer(tokenizer, **kwargs) Text splitter that uses HuggingFace tokenizer to count length. from_language(language, **kwargs) from_tiktoken_encoder([encoding_name, ...]) Text splitter that uses tiktoken encoder to count length. get_separators_for_language(language) split_documents(documents) Split documents. split_text(text) Split text into multiple components. transform_documents(documents, **kwargs) Transform sequence of documents by splitting them. async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Asynchronously transform a sequence of documents by splitting them. create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document]¶ Create documents from a list of texts. classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter¶ Text splitter that uses HuggingFace tokenizer to count length. classmethod from_language(language: Language, **kwargs: Any) → RecursiveCharacterTextSplitter¶ classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → TS¶ Text splitter that uses tiktoken encoder to count length. static get_separators_for_language(language: Language) → List[str]¶ split_documents(documents: Iterable[Document]) → List[Document]¶ Split documents. split_text(text: str) → List[str]¶ Split text into multiple components. transform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Transform sequence of documents by splitting them.
Attempts to split the text along Markdown-formatted headings.
c40de1a7-eb45-4ab2-9e49-df64d63a708f
[ "__future__.annotations", "copy", "logging", "re", "abc.ABC", "abc.abstractmethod", "dataclasses.dataclass", "enum.Enum", "typing.AbstractSet", "typing.Any", "typing.Callable", "typing.Collection", "typing.Dict", "typing.Iterable", "typing.List", "typing.Literal", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.TypedDict", "typing.TypeVar", "typing.Union", "typing.cast", "langchain.docstore.document.Document", "langchain.schema.BaseDocumentTransformer", "spacy", "spacy.lang.en.English", "transformers.PreTrainedTokenizerBase", "tiktoken", "tiktoken", "sentence_transformers.SentenceTransformer", "nltk.tokenize.sent_tokenize" ]
langchain.text_splitter.LatexTextSplitter
Class
https://api.python.langchain.com/en/latest/text_splitter/langchain.text_splitter.LatexTextSplitter.html#langchain.text_splitter.LatexTextSplitter
class LatexTextSplitter(RecursiveCharacterTextSplitter): """Attempts to split the text along Latex-formatted layout elements.""" def __init__(self, **kwargs: Any) -> None: """Initialize a LatexTextSplitter.""" separators = self.get_separators_for_language(Language.LATEX) super().__init__(separators=separators, **kwargs)
langchain.text_splitter.LatexTextSplitter¶ class langchain.text_splitter.LatexTextSplitter(**kwargs: Any)[source]¶ Bases: RecursiveCharacterTextSplitter Attempts to split the text along Latex-formatted layout elements. Initialize a LatexTextSplitter. Methods __init__(**kwargs) Initialize a LatexTextSplitter. atransform_documents(documents, **kwargs) Asynchronously transform a sequence of documents by splitting them. create_documents(texts[, metadatas]) Create documents from a list of texts. from_huggingface_tokenizer(tokenizer, **kwargs) Text splitter that uses HuggingFace tokenizer to count length. from_language(language, **kwargs) from_tiktoken_encoder([encoding_name, ...]) Text splitter that uses tiktoken encoder to count length. get_separators_for_language(language) split_documents(documents) Split documents. split_text(text) Split text into multiple components. transform_documents(documents, **kwargs) Transform sequence of documents by splitting them. async atransform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Asynchronously transform a sequence of documents by splitting them. create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) → List[Document]¶ Create documents from a list of texts. classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) → TextSplitter¶ Text splitter that uses HuggingFace tokenizer to count length. classmethod from_language(language: Language, **kwargs: Any) → RecursiveCharacterTextSplitter¶ classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) → TS¶ Text splitter that uses tiktoken encoder to count length. static get_separators_for_language(language: Language) → List[str]¶ split_documents(documents: Iterable[Document]) → List[Document]¶ Split documents. split_text(text: str) → List[str]¶ Split text into multiple components. transform_documents(documents: Sequence[Document], **kwargs: Any) → Sequence[Document]¶ Transform sequence of documents by splitting them.
Attempts to split the text along Latex-formatted layout elements.
ae00547c-2d3e-4dc1-8e16-439411e7b03c
[ "__future__.annotations", "hashlib", "inspect", "json", "logging", "abc.ABC", "abc.abstractmethod", "datetime.timedelta", "typing.TYPE_CHECKING", "typing.Any", "typing.Callable", "typing.Dict", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.Union", "typing.cast", "sqlalchemy.Column", "sqlalchemy.Integer", "sqlalchemy.String", "sqlalchemy.create_engine", "sqlalchemy.select", "sqlalchemy.engine.base.Engine", "sqlalchemy.orm.Session", "langchain.utils.get_from_env", "sqlalchemy.orm.declarative_base", "sqlalchemy.ext.declarative.declarative_base", "langchain.embeddings.base.Embeddings", "langchain.load.dump.dumps", "langchain.load.load.loads", "langchain.schema.Generation", "langchain.vectorstores.redis.Redis", "momento" ]
langchain.cache.BaseCache
Class
https://api.python.langchain.com/en/latest/cache/langchain.cache.BaseCache.html#langchain.cache.BaseCache
class BaseCache(ABC): """Base interface for cache.""" @abstractmethod def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" @abstractmethod def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" @abstractmethod def clear(self, **kwargs: Any) -> None: """Clear cache that can take additional keyword arguments."""
langchain.cache.BaseCache¶ class langchain.cache.BaseCache[source]¶ Bases: ABC Base interface for cache. Methods __init__() clear(**kwargs) Clear cache that can take additional keyword arguments. lookup(prompt, llm_string) Look up based on prompt and llm_string. update(prompt, llm_string, return_val) Update cache based on prompt and llm_string. abstract clear(**kwargs: Any) → None[source]¶ Clear cache that can take additional keyword arguments. abstract lookup(prompt: str, llm_string: str) → Optional[Sequence[Generation]][source]¶ Look up based on prompt and llm_string. abstract update(prompt: str, llm_string: str, return_val: Sequence[Generation]) → None[source]¶ Update cache based on prompt and llm_string.
Base interface for cache.
233b1ceb-199c-4ca3-b1ef-b1c96d656153
[ "__future__.annotations", "hashlib", "inspect", "json", "logging", "abc.ABC", "abc.abstractmethod", "datetime.timedelta", "typing.TYPE_CHECKING", "typing.Any", "typing.Callable", "typing.Dict", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.Union", "typing.cast", "sqlalchemy.Column", "sqlalchemy.Integer", "sqlalchemy.String", "sqlalchemy.create_engine", "sqlalchemy.select", "sqlalchemy.engine.base.Engine", "sqlalchemy.orm.Session", "langchain.utils.get_from_env", "sqlalchemy.orm.declarative_base", "sqlalchemy.ext.declarative.declarative_base", "langchain.embeddings.base.Embeddings", "langchain.load.dump.dumps", "langchain.load.load.loads", "langchain.schema.Generation", "langchain.vectorstores.redis.Redis", "momento" ]
langchain.cache.InMemoryCache
Class
https://api.python.langchain.com/en/latest/cache/langchain.cache.InMemoryCache.html#langchain.cache.InMemoryCache
class InMemoryCache(BaseCache): """Cache that stores things in memory.""" def __init__(self) -> None: """Initialize with empty cache.""" self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {} def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" return self._cache.get((prompt, llm_string), None) def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" self._cache[(prompt, llm_string)] = return_val def clear(self, **kwargs: Any) -> None: """Clear cache.""" self._cache = {}
langchain.cache.InMemoryCache¶ class langchain.cache.InMemoryCache[source]¶ Bases: BaseCache Cache that stores things in memory. Initialize with empty cache. Methods __init__() Initialize with empty cache. clear(**kwargs) Clear cache. lookup(prompt, llm_string) Look up based on prompt and llm_string. update(prompt, llm_string, return_val) Update cache based on prompt and llm_string. clear(**kwargs: Any) → None[source]¶ Clear cache. lookup(prompt: str, llm_string: str) → Optional[Sequence[Generation]][source]¶ Look up based on prompt and llm_string. update(prompt: str, llm_string: str, return_val: Sequence[Generation]) → None[source]¶ Update cache based on prompt and llm_string.
Cache that stores things in memory.
a699fef6-3375-4428-a077-6bb3c18d7237
[ "__future__.annotations", "hashlib", "inspect", "json", "logging", "abc.ABC", "abc.abstractmethod", "datetime.timedelta", "typing.TYPE_CHECKING", "typing.Any", "typing.Callable", "typing.Dict", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.Union", "typing.cast", "sqlalchemy.Column", "sqlalchemy.Integer", "sqlalchemy.String", "sqlalchemy.create_engine", "sqlalchemy.select", "sqlalchemy.engine.base.Engine", "sqlalchemy.orm.Session", "langchain.utils.get_from_env", "sqlalchemy.orm.declarative_base", "sqlalchemy.ext.declarative.declarative_base", "langchain.embeddings.base.Embeddings", "langchain.load.dump.dumps", "langchain.load.load.loads", "langchain.schema.Generation", "langchain.vectorstores.redis.Redis", "momento" ]
langchain.cache.FullLLMCache
Class
https://api.python.langchain.com/en/latest/cache/langchain.cache.FullLLMCache.html#langchain.cache.FullLLMCache
class FullLLMCache(Base): # type: ignore """SQLite table for full LLM Cache (all generations).""" __tablename__ = "full_llm_cache" prompt = Column(String, primary_key=True) llm = Column(String, primary_key=True) idx = Column(Integer, primary_key=True) response = Column(String)
langchain.cache.FullLLMCache¶ class langchain.cache.FullLLMCache(**kwargs)[source]¶ Bases: Base SQLite table for full LLM Cache (all generations). A simple constructor that allows initialization from kwargs. Sets attributes on the constructed instance using the names and values in kwargs. Only keys that are present as attributes of the instance’s class are allowed. These could be, for example, any mapped columns or relationships. Methods __init__(**kwargs) A simple constructor that allows initialization from kwargs. Attributes idx llm metadata prompt registry response idx¶ llm¶ metadata: MetaData = MetaData()¶ prompt¶ registry: RegistryType = <sqlalchemy.orm.decl_api.registry object>¶ response¶
SQLite table for full LLM Cache (all generations).
d989c718-2836-4294-8f5a-95ffe8698550
[ "__future__.annotations", "hashlib", "inspect", "json", "logging", "abc.ABC", "abc.abstractmethod", "datetime.timedelta", "typing.TYPE_CHECKING", "typing.Any", "typing.Callable", "typing.Dict", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.Union", "typing.cast", "sqlalchemy.Column", "sqlalchemy.Integer", "sqlalchemy.String", "sqlalchemy.create_engine", "sqlalchemy.select", "sqlalchemy.engine.base.Engine", "sqlalchemy.orm.Session", "langchain.utils.get_from_env", "sqlalchemy.orm.declarative_base", "sqlalchemy.ext.declarative.declarative_base", "langchain.embeddings.base.Embeddings", "langchain.load.dump.dumps", "langchain.load.load.loads", "langchain.schema.Generation", "langchain.vectorstores.redis.Redis", "momento" ]
langchain.cache.SQLAlchemyCache
Class
https://api.python.langchain.com/en/latest/cache/langchain.cache.SQLAlchemyCache.html#langchain.cache.SQLAlchemyCache
class SQLAlchemyCache(BaseCache): """Cache that uses SQAlchemy as a backend.""" def __init__(self, engine: Engine, cache_schema: Type[FullLLMCache] = FullLLMCache): """Initialize by creating all tables.""" self.engine = engine self.cache_schema = cache_schema self.cache_schema.metadata.create_all(self.engine) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" stmt = ( select(self.cache_schema.response) .where(self.cache_schema.prompt == prompt) # type: ignore .where(self.cache_schema.llm == llm_string) .order_by(self.cache_schema.idx) ) with Session(self.engine) as session: rows = session.execute(stmt).fetchall() if rows: try: return [loads(row[0]) for row in rows] except Exception: logger.warning( "Retrieving a cache value that could not be deserialized " "properly. This is likely due to the cache being in an " "older format. Please recreate your cache to avoid this " "error." ) # In a previous life we stored the raw text directly # in the table, so assume it's in that format. return [Generation(text=row[0]) for row in rows] return None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update based on prompt and llm_string.""" items = [ self.cache_schema(prompt=prompt, llm=llm_string, response=dumps(gen), idx=i) for i, gen in enumerate(return_val) ] with Session(self.engine) as session, session.begin(): for item in items: session.merge(item) def clear(self, **kwargs: Any) -> None: """Clear cache.""" with Session(self.engine) as session: session.query(self.cache_schema).delete()
langchain.cache.SQLAlchemyCache¶ class langchain.cache.SQLAlchemyCache(engine: ~sqlalchemy.engine.base.Engine, cache_schema: ~typing.Type[~langchain.cache.FullLLMCache] = <class 'langchain.cache.FullLLMCache'>)[source]¶ Bases: BaseCache Cache that uses SQAlchemy as a backend. Initialize by creating all tables. Methods __init__(engine[, cache_schema]) Initialize by creating all tables. clear(**kwargs) Clear cache. lookup(prompt, llm_string) Look up based on prompt and llm_string. update(prompt, llm_string, return_val) Update based on prompt and llm_string. clear(**kwargs: Any) → None[source]¶ Clear cache. lookup(prompt: str, llm_string: str) → Optional[Sequence[Generation]][source]¶ Look up based on prompt and llm_string. update(prompt: str, llm_string: str, return_val: Sequence[Generation]) → None[source]¶ Update based on prompt and llm_string.
Cache that uses SQAlchemy as a backend.
7770187f-c498-4f46-8dec-a1c5a41311ce
[ "__future__.annotations", "hashlib", "inspect", "json", "logging", "abc.ABC", "abc.abstractmethod", "datetime.timedelta", "typing.TYPE_CHECKING", "typing.Any", "typing.Callable", "typing.Dict", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.Union", "typing.cast", "sqlalchemy.Column", "sqlalchemy.Integer", "sqlalchemy.String", "sqlalchemy.create_engine", "sqlalchemy.select", "sqlalchemy.engine.base.Engine", "sqlalchemy.orm.Session", "langchain.utils.get_from_env", "sqlalchemy.orm.declarative_base", "sqlalchemy.ext.declarative.declarative_base", "langchain.embeddings.base.Embeddings", "langchain.load.dump.dumps", "langchain.load.load.loads", "langchain.schema.Generation", "langchain.vectorstores.redis.Redis", "momento" ]
langchain.cache.SQLiteCache
Class
https://api.python.langchain.com/en/latest/cache/langchain.cache.SQLiteCache.html#langchain.cache.SQLiteCache
class SQLiteCache(SQLAlchemyCache): """Cache that uses SQLite as a backend.""" def __init__(self, database_path: str = ".langchain.db"): """Initialize by creating the engine and all tables.""" engine = create_engine(f"sqlite:///{database_path}") super().__init__(engine)
langchain.cache.SQLiteCache¶ class langchain.cache.SQLiteCache(database_path: str = '.langchain.db')[source]¶ Bases: SQLAlchemyCache Cache that uses SQLite as a backend. Initialize by creating the engine and all tables. Methods __init__([database_path]) Initialize by creating the engine and all tables. clear(**kwargs) Clear cache. lookup(prompt, llm_string) Look up based on prompt and llm_string. update(prompt, llm_string, return_val) Update based on prompt and llm_string. clear(**kwargs: Any) → None¶ Clear cache. lookup(prompt: str, llm_string: str) → Optional[Sequence[Generation]]¶ Look up based on prompt and llm_string. update(prompt: str, llm_string: str, return_val: Sequence[Generation]) → None¶ Update based on prompt and llm_string.
Cache that uses SQLite as a backend.
7a135545-836a-4311-8ce2-101da505342d
[ "__future__.annotations", "hashlib", "inspect", "json", "logging", "abc.ABC", "abc.abstractmethod", "datetime.timedelta", "typing.TYPE_CHECKING", "typing.Any", "typing.Callable", "typing.Dict", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.Union", "typing.cast", "sqlalchemy.Column", "sqlalchemy.Integer", "sqlalchemy.String", "sqlalchemy.create_engine", "sqlalchemy.select", "sqlalchemy.engine.base.Engine", "sqlalchemy.orm.Session", "langchain.utils.get_from_env", "sqlalchemy.orm.declarative_base", "sqlalchemy.ext.declarative.declarative_base", "langchain.embeddings.base.Embeddings", "langchain.load.dump.dumps", "langchain.load.load.loads", "langchain.schema.Generation", "langchain.vectorstores.redis.Redis", "momento" ]
langchain.cache.RedisCache
Class
https://api.python.langchain.com/en/latest/cache/langchain.cache.RedisCache.html#langchain.cache.RedisCache
class RedisCache(BaseCache): """Cache that uses Redis as a backend.""" # TODO - implement a TTL policy in Redis def __init__(self, redis_: Any): """Initialize by passing in Redis instance.""" try: from redis import Redis except ImportError: raise ValueError( "Could not import redis python package. " "Please install it with `pip install redis`." ) if not isinstance(redis_, Redis): raise ValueError("Please pass in Redis object.") self.redis = redis_ def _key(self, prompt: str, llm_string: str) -> str: """Compute key from prompt and llm_string""" return _hash(prompt + llm_string) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" generations = [] # Read from a Redis HASH results = self.redis.hgetall(self._key(prompt, llm_string)) if results: for _, text in results.items(): generations.append(Generation(text=text)) return generations if generations else None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "RedisCache only supports caching of normal LLM generations, " f"got {type(gen)}" ) # Write to a Redis HASH key = self._key(prompt, llm_string) self.redis.hset( key, mapping={ str(idx): generation.text for idx, generation in enumerate(return_val) }, ) def clear(self, **kwargs: Any) -> None: """Clear cache. If `asynchronous` is True, flush asynchronously.""" asynchronous = kwargs.get("asynchronous", False) self.redis.flushdb(asynchronous=asynchronous, **kwargs)
langchain.cache.RedisCache¶ class langchain.cache.RedisCache(redis_: Any)[source]¶ Bases: BaseCache Cache that uses Redis as a backend. Initialize by passing in Redis instance. Methods __init__(redis_) Initialize by passing in Redis instance. clear(**kwargs) Clear cache. lookup(prompt, llm_string) Look up based on prompt and llm_string. update(prompt, llm_string, return_val) Update cache based on prompt and llm_string. clear(**kwargs: Any) → None[source]¶ Clear cache. If asynchronous is True, flush asynchronously. lookup(prompt: str, llm_string: str) → Optional[Sequence[Generation]][source]¶ Look up based on prompt and llm_string. update(prompt: str, llm_string: str, return_val: Sequence[Generation]) → None[source]¶ Update cache based on prompt and llm_string.
Cache that uses Redis as a backend.
d998ae8b-ce2b-4762-a0db-4f88c9ccd61f
[ "__future__.annotations", "hashlib", "inspect", "json", "logging", "abc.ABC", "abc.abstractmethod", "datetime.timedelta", "typing.TYPE_CHECKING", "typing.Any", "typing.Callable", "typing.Dict", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.Union", "typing.cast", "sqlalchemy.Column", "sqlalchemy.Integer", "sqlalchemy.String", "sqlalchemy.create_engine", "sqlalchemy.select", "sqlalchemy.engine.base.Engine", "sqlalchemy.orm.Session", "langchain.utils.get_from_env", "sqlalchemy.orm.declarative_base", "sqlalchemy.ext.declarative.declarative_base", "langchain.embeddings.base.Embeddings", "langchain.load.dump.dumps", "langchain.load.load.loads", "langchain.schema.Generation", "langchain.vectorstores.redis.Redis", "momento", "redis.Redis" ]
langchain.cache.RedisSemanticCache
Class
https://api.python.langchain.com/en/latest/cache/langchain.cache.RedisSemanticCache.html#langchain.cache.RedisSemanticCache
class RedisSemanticCache(BaseCache): """Cache that uses Redis as a vector-store backend.""" # TODO - implement a TTL policy in Redis def __init__( self, redis_url: str, embedding: Embeddings, score_threshold: float = 0.2 ): """Initialize by passing in the `init` GPTCache func Args: redis_url (str): URL to connect to Redis. embedding (Embedding): Embedding provider for semantic encoding and search. score_threshold (float, 0.2): Example: .. code-block:: python import langchain from langchain.cache import RedisSemanticCache from langchain.embeddings import OpenAIEmbeddings langchain.llm_cache = RedisSemanticCache( redis_url="redis://localhost:6379", embedding=OpenAIEmbeddings() ) """ self._cache_dict: Dict[str, RedisVectorstore] = {} self.redis_url = redis_url self.embedding = embedding self.score_threshold = score_threshold def _index_name(self, llm_string: str) -> str: hashed_index = _hash(llm_string) return f"cache:{hashed_index}" def _get_llm_cache(self, llm_string: str) -> RedisVectorstore: index_name = self._index_name(llm_string) # return vectorstore client for the specific llm string if index_name in self._cache_dict: return self._cache_dict[index_name] # create new vectorstore client for the specific llm string try: self._cache_dict[index_name] = RedisVectorstore.from_existing_index( embedding=self.embedding, index_name=index_name, redis_url=self.redis_url, ) except ValueError: redis = RedisVectorstore( embedding_function=self.embedding.embed_query, index_name=index_name, redis_url=self.redis_url, ) _embedding = self.embedding.embed_query(text="test") redis._create_index(dim=len(_embedding)) self._cache_dict[index_name] = redis return self._cache_dict[index_name] def clear(self, **kwargs: Any) -> None: """Clear semantic cache for a given llm_string.""" index_name = self._index_name(kwargs["llm_string"]) if index_name in self._cache_dict: self._cache_dict[index_name].drop_index( index_name=index_name, delete_documents=True, redis_url=self.redis_url ) del self._cache_dict[index_name] def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" llm_cache = self._get_llm_cache(llm_string) generations = [] # Read from a Hash results = llm_cache.similarity_search_limit_score( query=prompt, k=1, score_threshold=self.score_threshold, ) if results: for document in results: for text in document.metadata["return_val"]: generations.append(Generation(text=text)) return generations if generations else None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache based on prompt and llm_string.""" for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "RedisSemanticCache only supports caching of " f"normal LLM generations, got {type(gen)}" ) llm_cache = self._get_llm_cache(llm_string) # Write to vectorstore metadata = { "llm_string": llm_string, "prompt": prompt, "return_val": [generation.text for generation in return_val], } llm_cache.add_texts(texts=[prompt], metadatas=[metadata])
langchain.cache.RedisSemanticCache¶ class langchain.cache.RedisSemanticCache(redis_url: str, embedding: Embeddings, score_threshold: float = 0.2)[source]¶ Bases: BaseCache Cache that uses Redis as a vector-store backend. Initialize by passing in the init GPTCache func Parameters redis_url (str) – URL to connect to Redis. embedding (Embedding) – Embedding provider for semantic encoding and search. score_threshold (float, 0.2) – Example: import langchain from langchain.cache import RedisSemanticCache from langchain.embeddings import OpenAIEmbeddings langchain.llm_cache = RedisSemanticCache( redis_url="redis://localhost:6379", embedding=OpenAIEmbeddings() ) Methods __init__(redis_url, embedding[, score_threshold]) Initialize by passing in the init GPTCache func clear(**kwargs) Clear semantic cache for a given llm_string. lookup(prompt, llm_string) Look up based on prompt and llm_string. update(prompt, llm_string, return_val) Update cache based on prompt and llm_string. clear(**kwargs: Any) → None[source]¶ Clear semantic cache for a given llm_string. lookup(prompt: str, llm_string: str) → Optional[Sequence[Generation]][source]¶ Look up based on prompt and llm_string. update(prompt: str, llm_string: str, return_val: Sequence[Generation]) → None[source]¶ Update cache based on prompt and llm_string.
Cache that uses Redis as a vector-store backend.
5a39ff29-1be4-4797-841b-90234c4b2892
[ "__future__.annotations", "hashlib", "inspect", "json", "logging", "abc.ABC", "abc.abstractmethod", "datetime.timedelta", "typing.TYPE_CHECKING", "typing.Any", "typing.Callable", "typing.Dict", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.Union", "typing.cast", "sqlalchemy.Column", "sqlalchemy.Integer", "sqlalchemy.String", "sqlalchemy.create_engine", "sqlalchemy.select", "sqlalchemy.engine.base.Engine", "sqlalchemy.orm.Session", "langchain.utils.get_from_env", "sqlalchemy.orm.declarative_base", "sqlalchemy.ext.declarative.declarative_base", "langchain.embeddings.base.Embeddings", "langchain.load.dump.dumps", "langchain.load.load.loads", "langchain.schema.Generation", "langchain.vectorstores.redis.Redis", "momento", "redis.Redis" ]
langchain.cache.GPTCache
Class
https://api.python.langchain.com/en/latest/cache/langchain.cache.GPTCache.html#langchain.cache.GPTCache
class GPTCache(BaseCache): """Cache that uses GPTCache as a backend.""" def __init__( self, init_func: Union[ Callable[[Any, str], None], Callable[[Any], None], None ] = None, ): """Initialize by passing in init function (default: `None`). Args: init_func (Optional[Callable[[Any], None]]): init `GPTCache` function (default: `None`) Example: .. code-block:: python # Initialize GPTCache with a custom init function import gptcache from gptcache.processor.pre import get_prompt from gptcache.manager.factory import get_data_manager # Avoid multiple caches using the same file, causing different llm model caches to affect each other def init_gptcache(cache_obj: gptcache.Cache, llm str): cache_obj.init( pre_embedding_func=get_prompt, data_manager=manager_factory( manager="map", data_dir=f"map_cache_{llm}" ), ) langchain.llm_cache = GPTCache(init_gptcache) """ try: import gptcache # noqa: F401 except ImportError: raise ImportError( "Could not import gptcache python package. " "Please install it with `pip install gptcache`." ) self.init_gptcache_func: Union[ Callable[[Any, str], None], Callable[[Any], None], None ] = init_func self.gptcache_dict: Dict[str, Any] = {} def _new_gptcache(self, llm_string: str) -> Any: """New gptcache object""" from gptcache import Cache from gptcache.manager.factory import get_data_manager from gptcache.processor.pre import get_prompt _gptcache = Cache() if self.init_gptcache_func is not None: sig = inspect.signature(self.init_gptcache_func) if len(sig.parameters) == 2: self.init_gptcache_func(_gptcache, llm_string) # type: ignore[call-arg] else: self.init_gptcache_func(_gptcache) # type: ignore[call-arg] else: _gptcache.init( pre_embedding_func=get_prompt, data_manager=get_data_manager(data_path=llm_string), ) self.gptcache_dict[llm_string] = _gptcache return _gptcache def _get_gptcache(self, llm_string: str) -> Any: """Get a cache object. When the corresponding llm model cache does not exist, it will be created.""" return self.gptcache_dict.get(llm_string, self._new_gptcache(llm_string)) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Look up the cache data. First, retrieve the corresponding cache object using the `llm_string` parameter, and then retrieve the data from the cache based on the `prompt`. """ from gptcache.adapter.api import get _gptcache = self.gptcache_dict.get(llm_string, None) if _gptcache is None: return None res = get(prompt, cache_obj=_gptcache) if res: return [ Generation(**generation_dict) for generation_dict in json.loads(res) ] return None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Update cache. First, retrieve the corresponding cache object using the `llm_string` parameter, and then store the `prompt` and `return_val` in the cache object. """ for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "GPTCache only supports caching of normal LLM generations, " f"got {type(gen)}" ) from gptcache.adapter.api import put _gptcache = self._get_gptcache(llm_string) handled_data = json.dumps([generation.dict() for generation in return_val]) put(prompt, handled_data, cache_obj=_gptcache) return None def clear(self, **kwargs: Any) -> None: """Clear cache.""" from gptcache import Cache for gptcache_instance in self.gptcache_dict.values(): gptcache_instance = cast(Cache, gptcache_instance) gptcache_instance.flush() self.gptcache_dict.clear()
langchain.cache.GPTCache¶ class langchain.cache.GPTCache(init_func: Optional[Union[Callable[[Any, str], None], Callable[[Any], None]]] = None)[source]¶ Bases: BaseCache Cache that uses GPTCache as a backend. Initialize by passing in init function (default: None). Parameters init_func (Optional[Callable[[Any], None]]) – init GPTCache function (default – None) Example: .. code-block:: python # Initialize GPTCache with a custom init function import gptcache from gptcache.processor.pre import get_prompt from gptcache.manager.factory import get_data_manager # Avoid multiple caches using the same file, causing different llm model caches to affect each other def init_gptcache(cache_obj: gptcache.Cache, llm str): cache_obj.init(pre_embedding_func=get_prompt, data_manager=manager_factory( manager=”map”, data_dir=f”map_cache_{llm}” ), ) langchain.llm_cache = GPTCache(init_gptcache) Methods __init__([init_func]) Initialize by passing in init function (default: None). clear(**kwargs) Clear cache. lookup(prompt, llm_string) Look up the cache data. update(prompt, llm_string, return_val) Update cache. clear(**kwargs: Any) → None[source]¶ Clear cache. lookup(prompt: str, llm_string: str) → Optional[Sequence[Generation]][source]¶ Look up the cache data. First, retrieve the corresponding cache object using the llm_string parameter, and then retrieve the data from the cache based on the prompt. update(prompt: str, llm_string: str, return_val: Sequence[Generation]) → None[source]¶ Update cache. First, retrieve the corresponding cache object using the llm_string parameter, and then store the prompt and return_val in the cache object.
Cache that uses GPTCache as a backend.
88b41bfa-17d4-48a9-a32a-69b0586084b8
[ "__future__.annotations", "hashlib", "inspect", "json", "logging", "abc.ABC", "abc.abstractmethod", "datetime.timedelta", "typing.TYPE_CHECKING", "typing.Any", "typing.Callable", "typing.Dict", "typing.Optional", "typing.Sequence", "typing.Tuple", "typing.Type", "typing.Union", "typing.cast", "sqlalchemy.Column", "sqlalchemy.Integer", "sqlalchemy.String", "sqlalchemy.create_engine", "sqlalchemy.select", "sqlalchemy.engine.base.Engine", "sqlalchemy.orm.Session", "langchain.utils.get_from_env", "sqlalchemy.orm.declarative_base", "sqlalchemy.ext.declarative.declarative_base", "langchain.embeddings.base.Embeddings", "langchain.load.dump.dumps", "langchain.load.load.loads", "langchain.schema.Generation", "langchain.vectorstores.redis.Redis", "momento", "redis.Redis", "gptcache", "gptcache.Cache", "gptcache.manager.factory.get_data_manager", "gptcache.processor.pre.get_prompt", "gptcache.adapter.api.get", "gptcache.adapter.api.put", "gptcache.Cache", "momento.responses.CreateCache" ]
langchain.cache.MomentoCache
Class
https://api.python.langchain.com/en/latest/cache/langchain.cache.MomentoCache.html#langchain.cache.MomentoCache
class MomentoCache(BaseCache): """Cache that uses Momento as a backend. See https://gomomento.com/""" def __init__( self, cache_client: momento.CacheClient, cache_name: str, *, ttl: Optional[timedelta] = None, ensure_cache_exists: bool = True, ): """Instantiate a prompt cache using Momento as a backend. Note: to instantiate the cache client passed to MomentoCache, you must have a Momento account. See https://gomomento.com/. Args: cache_client (CacheClient): The Momento cache client. cache_name (str): The name of the cache to use to store the data. ttl (Optional[timedelta], optional): The time to live for the cache items. Defaults to None, ie use the client default TTL. ensure_cache_exists (bool, optional): Create the cache if it doesn't exist. Defaults to True. Raises: ImportError: Momento python package is not installed. TypeError: cache_client is not of type momento.CacheClientObject ValueError: ttl is non-null and non-negative """ try: from momento import CacheClient except ImportError: raise ImportError( "Could not import momento python package. " "Please install it with `pip install momento`." ) if not isinstance(cache_client, CacheClient): raise TypeError("cache_client must be a momento.CacheClient object.") _validate_ttl(ttl) if ensure_cache_exists: _ensure_cache_exists(cache_client, cache_name) self.cache_client = cache_client self.cache_name = cache_name self.ttl = ttl @classmethod def from_client_params( cls, cache_name: str, ttl: timedelta, *, configuration: Optional[momento.config.Configuration] = None, auth_token: Optional[str] = None, **kwargs: Any, ) -> MomentoCache: """Construct cache from CacheClient parameters.""" try: from momento import CacheClient, Configurations, CredentialProvider except ImportError: raise ImportError( "Could not import momento python package. " "Please install it with `pip install momento`." ) if configuration is None: configuration = Configurations.Laptop.v1() auth_token = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN") credentials = CredentialProvider.from_string(auth_token) cache_client = CacheClient(configuration, credentials, default_ttl=ttl) return cls(cache_client, cache_name, ttl=ttl, **kwargs) def __key(self, prompt: str, llm_string: str) -> str: """Compute cache key from prompt and associated model and settings. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model version and settings. Returns: str: The cache key. """ return _hash(prompt + llm_string) def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: """Lookup llm generations in cache by prompt and associated model and settings. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model version and settings. Raises: SdkException: Momento service or network error Returns: Optional[RETURN_VAL_TYPE]: A list of language model generations. """ from momento.responses import CacheGet generations: RETURN_VAL_TYPE = [] get_response = self.cache_client.get( self.cache_name, self.__key(prompt, llm_string) ) if isinstance(get_response, CacheGet.Hit): value = get_response.value_string generations = _load_generations_from_json(value) elif isinstance(get_response, CacheGet.Miss): pass elif isinstance(get_response, CacheGet.Error): raise get_response.inner_exception return generations if generations else None def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: """Store llm generations in cache. Args: prompt (str): The prompt run through the language model. llm_string (str): The language model string. return_val (RETURN_VAL_TYPE): A list of language model generations. Raises: SdkException: Momento service or network error Exception: Unexpected response """ for gen in return_val: if not isinstance(gen, Generation): raise ValueError( "Momento only supports caching of normal LLM generations, " f"got {type(gen)}" ) key = self.__key(prompt, llm_string) value = _dump_generations_to_json(return_val) set_response = self.cache_client.set(self.cache_name, key, value, self.ttl) from momento.responses import CacheSet if isinstance(set_response, CacheSet.Success): pass elif isinstance(set_response, CacheSet.Error): raise set_response.inner_exception else: raise Exception(f"Unexpected response: {set_response}") def clear(self, **kwargs: Any) -> None: """Clear the cache. Raises: SdkException: Momento service or network error """ from momento.responses import CacheFlush flush_response = self.cache_client.flush_cache(self.cache_name) if isinstance(flush_response, CacheFlush.Success): pass elif isinstance(flush_response, CacheFlush.Error): raise flush_response.inner_exception
langchain.cache.MomentoCache¶ class langchain.cache.MomentoCache(cache_client: momento.CacheClient, cache_name: str, *, ttl: Optional[timedelta] = None, ensure_cache_exists: bool = True)[source]¶ Bases: BaseCache Cache that uses Momento as a backend. See https://gomomento.com/ Instantiate a prompt cache using Momento as a backend. Note: to instantiate the cache client passed to MomentoCache, you must have a Momento account. See https://gomomento.com/. Parameters cache_client (CacheClient) – The Momento cache client. cache_name (str) – The name of the cache to use to store the data. ttl (Optional[timedelta], optional) – The time to live for the cache items. Defaults to None, ie use the client default TTL. ensure_cache_exists (bool, optional) – Create the cache if it doesn’t exist. Defaults to True. Raises ImportError – Momento python package is not installed. TypeError – cache_client is not of type momento.CacheClientObject ValueError – ttl is non-null and non-negative Methods __init__(cache_client, cache_name, *[, ttl, ...]) Instantiate a prompt cache using Momento as a backend. clear(**kwargs) Clear the cache. from_client_params(cache_name, ttl, *[, ...]) Construct cache from CacheClient parameters. lookup(prompt, llm_string) Lookup llm generations in cache by prompt and associated model and settings. update(prompt, llm_string, return_val) Store llm generations in cache. clear(**kwargs: Any) → None[source]¶ Clear the cache. Raises SdkException – Momento service or network error classmethod from_client_params(cache_name: str, ttl: timedelta, *, configuration: Optional[momento.config.Configuration] = None, auth_token: Optional[str] = None, **kwargs: Any) → MomentoCache[source]¶ Construct cache from CacheClient parameters. lookup(prompt: str, llm_string: str) → Optional[Sequence[Generation]][source]¶ Lookup llm generations in cache by prompt and associated model and settings. Parameters prompt (str) – The prompt run through the language model. llm_string (str) – The language model version and settings. Raises SdkException – Momento service or network error Returns A list of language model generations. Return type Optional[RETURN_VAL_TYPE] update(prompt: str, llm_string: str, return_val: Sequence[Generation]) → None[source]¶ Store llm generations in cache. Parameters prompt (str) – The prompt run through the language model. llm_string (str) – The language model string. return_val (RETURN_VAL_TYPE) – A list of language model generations. Raises SdkException – Momento service or network error Exception – Unexpected response
Cache that uses Momento as a backend.
6ca74280-166f-40c7-943c-7078751916cc
[ "subprocess", "pathlib.Path", "langchainplus_sdk.cli.main.get_docker_compose_command" ]
langchain.server.main
Function
https://api.python.langchain.com/en/latest/server/langchain.server.main.html#langchain.server.main
def main() -> None: """Run the langchain server locally.""" p = Path(__file__).absolute().parent / "docker-compose.yaml" docker_compose_command = get_docker_compose_command() subprocess.run([*docker_compose_command, "-f", str(p), "pull"]) subprocess.run([*docker_compose_command, "-f", str(p), "up"])
langchain.server.main¶ langchain.server.main() → None[source]¶ Run the langchain server locally.
Run the langchain server locally.
43d7b683-7299-45ab-8168-31f08aa0758a
[ "platform", "functools.lru_cache" ]
langchain.env.get_runtime_environment
Function
https://api.python.langchain.com/en/latest/env/langchain.env.get_runtime_environment.html#langchain.env.get_runtime_environment
@lru_cache(maxsize=1) def get_runtime_environment() -> dict: """Get information about the environment.""" # Lazy import to avoid circular imports from langchain import __version__ return { "library_version": __version__, "library": "langchain", "platform": platform.platform(), "runtime": "python", "runtime_version": platform.python_version(), }
langchain.env.get_runtime_environment¶ langchain.env.get_runtime_environment() → dict[source]¶ Get information about the environment.
Get information about the environment.
1f881e12-b2d8-4ad6-bba1-cdf96c32e69e
[ "typing.List", "typing.Optional", "typing.Tuple", "typing.Union", "numpy" ]
langchain.math_utils.cosine_similarity
Function
https://api.python.langchain.com/en/latest/math_utils/langchain.math_utils.cosine_similarity.html#langchain.math_utils.cosine_similarity
def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray: """Row-wise cosine similarity between two equal-width matrices.""" if len(X) == 0 or len(Y) == 0: return np.array([]) X = np.array(X) Y = np.array(Y) if X.shape[1] != Y.shape[1]: raise ValueError( f"Number of columns in X and Y must be the same. X has shape {X.shape} " f"and Y has shape {Y.shape}." ) X_norm = np.linalg.norm(X, axis=1) Y_norm = np.linalg.norm(Y, axis=1) similarity = np.dot(X, Y.T) / np.outer(X_norm, Y_norm) similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0 return similarity
langchain.math_utils.cosine_similarity¶ langchain.math_utils.cosine_similarity(X: Union[List[List[float]], List[ndarray], ndarray], Y: Union[List[List[float]], List[ndarray], ndarray]) → ndarray[source]¶ Row-wise cosine similarity between two equal-width matrices.
Row-wise cosine similarity between two equal-width matrices.
87d86254-d69f-4fab-a8fa-f337734d8763
[ "typing.List", "typing.Optional", "typing.Tuple", "typing.Union", "numpy" ]
langchain.math_utils.cosine_similarity_top_k
Function
https://api.python.langchain.com/en/latest/math_utils/langchain.math_utils.cosine_similarity_top_k.html#langchain.math_utils.cosine_similarity_top_k
def cosine_similarity_top_k( X: Matrix, Y: Matrix, top_k: Optional[int] = 5, score_threshold: Optional[float] = None, ) -> Tuple[List[Tuple[int, int]], List[float]]: """Row-wise cosine similarity with optional top-k and score threshold filtering. Args: X: Matrix. Y: Matrix, same width as X. top_k: Max number of results to return. score_threshold: Minimum cosine similarity of results. Returns: Tuple of two lists. First contains two-tuples of indices (X_idx, Y_idx), second contains corresponding cosine similarities. """ if len(X) == 0 or len(Y) == 0: return [], [] score_array = cosine_similarity(X, Y) sorted_idxs = score_array.flatten().argsort()[::-1] top_k = top_k or len(sorted_idxs) top_idxs = sorted_idxs[:top_k] score_threshold = score_threshold or -1.0 top_idxs = top_idxs[score_array.flatten()[top_idxs] > score_threshold] ret_idxs = [(x // score_array.shape[1], x % score_array.shape[1]) for x in top_idxs] scores = score_array.flatten()[top_idxs].tolist() return ret_idxs, scores
langchain.math_utils.cosine_similarity_top_k¶ langchain.math_utils.cosine_similarity_top_k(X: Union[List[List[float]], List[ndarray], ndarray], Y: Union[List[List[float]], List[ndarray], ndarray], top_k: Optional[int] = 5, score_threshold: Optional[float] = None) → Tuple[List[Tuple[int, int]], List[float]][source]¶ Row-wise cosine similarity with optional top-k and score threshold filtering. Parameters X – Matrix. Y – Matrix, same width as X. top_k – Max number of results to return. score_threshold – Minimum cosine similarity of results. Returns Tuple of two lists. First contains two-tuples of indices (X_idx, Y_idx),second contains corresponding cosine similarities.
Row-wise cosine similarity with optional top-k and score threshold filtering.
ef36466f-5832-43f4-8f0a-bd76d88a22da
[ "contextlib", "datetime", "importlib", "os", "importlib.metadata.version", "typing.Any", "typing.Callable", "typing.Dict", "typing.List", "typing.Optional", "typing.Tuple", "packaging.version.parse", "requests.HTTPError", "requests.Response" ]
langchain.utils.get_from_dict_or_env
Function
https://api.python.langchain.com/en/latest/utils/langchain.utils.get_from_dict_or_env.html#langchain.utils.get_from_dict_or_env
def get_from_dict_or_env( data: Dict[str, Any], key: str, env_key: str, default: Optional[str] = None ) -> str: """Get a value from a dictionary or an environment variable.""" if key in data and data[key]: return data[key] else: return get_from_env(key, env_key, default=default)
langchain.utils.get_from_dict_or_env¶ langchain.utils.get_from_dict_or_env(data: Dict[str, Any], key: str, env_key: str, default: Optional[str] = None) → str[source]¶ Get a value from a dictionary or an environment variable.
Get a value from a dictionary or an environment variable.
42149475-3152-475f-9811-6660adcd1ca3
[ "contextlib", "datetime", "importlib", "os", "importlib.metadata.version", "typing.Any", "typing.Callable", "typing.Dict", "typing.List", "typing.Optional", "typing.Tuple", "packaging.version.parse", "requests.HTTPError", "requests.Response" ]
langchain.utils.get_from_env
Function
https://api.python.langchain.com/en/latest/utils/langchain.utils.get_from_env.html#langchain.utils.get_from_env
def get_from_env(key: str, env_key: str, default: Optional[str] = None) -> str: """Get a value from a dictionary or an environment variable.""" if env_key in os.environ and os.environ[env_key]: return os.environ[env_key] elif default is not None: return default else: raise ValueError( f"Did not find {key}, please add an environment variable" f" `{env_key}` which contains it, or pass" f" `{key}` as a named parameter." )
langchain.utils.get_from_env¶ langchain.utils.get_from_env(key: str, env_key: str, default: Optional[str] = None) → str[source]¶ Get a value from a dictionary or an environment variable.
Get a value from a dictionary or an environment variable.
5c76a5cc-310f-4d76-9c0a-0d804fd41d78
[ "contextlib", "datetime", "importlib", "os", "importlib.metadata.version", "typing.Any", "typing.Callable", "typing.Dict", "typing.List", "typing.Optional", "typing.Tuple", "packaging.version.parse", "requests.HTTPError", "requests.Response" ]
langchain.utils.xor_args
Function
https://api.python.langchain.com/en/latest/utils/langchain.utils.xor_args.html#langchain.utils.xor_args
def xor_args(*arg_groups: Tuple[str, ...]) -> Callable: """Validate specified keyword args are mutually exclusive.""" def decorator(func: Callable) -> Callable: def wrapper(*args: Any, **kwargs: Any) -> Callable: """Validate exactly one arg in each group is not None.""" counts = [ sum(1 for arg in arg_group if kwargs.get(arg) is not None) for arg_group in arg_groups ] invalid_groups = [i for i, count in enumerate(counts) if count != 1] if invalid_groups: invalid_group_names = [", ".join(arg_groups[i]) for i in invalid_groups] raise ValueError( "Exactly one argument in each of the following" " groups must be defined:" f" {', '.join(invalid_group_names)}" ) return func(*args, **kwargs) return wrapper return decorator
langchain.utils.xor_args¶ langchain.utils.xor_args(*arg_groups: Tuple[str, ...]) → Callable[source]¶ Validate specified keyword args are mutually exclusive.
Validate specified keyword args are mutually exclusive.
73e8f649-85d1-42e7-843d-4b49a13bc02e
[ "contextlib", "datetime", "importlib", "os", "importlib.metadata.version", "typing.Any", "typing.Callable", "typing.Dict", "typing.List", "typing.Optional", "typing.Tuple", "packaging.version.parse", "requests.HTTPError", "requests.Response" ]
langchain.utils.raise_for_status_with_text
Function
https://api.python.langchain.com/en/latest/utils/langchain.utils.raise_for_status_with_text.html#langchain.utils.raise_for_status_with_text
def raise_for_status_with_text(response: Response) -> None: """Raise an error with the response text.""" try: response.raise_for_status() except HTTPError as e: raise ValueError(response.text) from e
langchain.utils.raise_for_status_with_text¶ langchain.utils.raise_for_status_with_text(response: Response) → None[source]¶ Raise an error with the response text.
Raise an error with the response text.
b0b03fea-36c1-4b1d-9afa-5512fa7e9826
[ "contextlib", "datetime", "importlib", "os", "importlib.metadata.version", "typing.Any", "typing.Callable", "typing.Dict", "typing.List", "typing.Optional", "typing.Tuple", "packaging.version.parse", "requests.HTTPError", "requests.Response" ]
langchain.utils.stringify_value
Function
https://api.python.langchain.com/en/latest/utils/langchain.utils.stringify_value.html#langchain.utils.stringify_value
def stringify_value(val: Any) -> str: """Stringify a value. Args: val: The value to stringify. Returns: str: The stringified value. """ if isinstance(val, str): return val elif isinstance(val, dict): return "\n" + stringify_dict(val) elif isinstance(val, list): return "\n".join(stringify_value(v) for v in val) else: return str(val)
langchain.utils.stringify_value¶ langchain.utils.stringify_value(val: Any) → str[source]¶ Stringify a value. Parameters val – The value to stringify. Returns The stringified value. Return type str
Stringify a value.
48a288c4-08d4-48cb-b511-afcc7e92c731
[ "contextlib", "datetime", "importlib", "os", "importlib.metadata.version", "typing.Any", "typing.Callable", "typing.Dict", "typing.List", "typing.Optional", "typing.Tuple", "packaging.version.parse", "requests.HTTPError", "requests.Response" ]
langchain.utils.stringify_dict
Function
https://api.python.langchain.com/en/latest/utils/langchain.utils.stringify_dict.html#langchain.utils.stringify_dict
def stringify_dict(data: dict) -> str: """Stringify a dictionary. Args: data: The dictionary to stringify. Returns: str: The stringified dictionary. """ text = "" for key, value in data.items(): text += key + ": " + stringify_value(value) + "\n" return text
langchain.utils.stringify_dict¶ langchain.utils.stringify_dict(data: dict) → str[source]¶ Stringify a dictionary. Parameters data – The dictionary to stringify. Returns The stringified dictionary. Return type str
Stringify a dictionary.
055649de-eeb8-435a-8268-ce122ebc1f49
[ "contextlib", "datetime", "importlib", "os", "importlib.metadata.version", "typing.Any", "typing.Callable", "typing.Dict", "typing.List", "typing.Optional", "typing.Tuple", "packaging.version.parse", "requests.HTTPError", "requests.Response" ]
langchain.utils.comma_list
Function
https://api.python.langchain.com/en/latest/utils/langchain.utils.comma_list.html#langchain.utils.comma_list
def comma_list(items: List[Any]) -> str: return ", ".join(str(item) for item in items)
langchain.utils.comma_list¶ langchain.utils.comma_list(items: List[Any]) → str[source]¶
09ca248d-16de-40ae-bb8a-682d3040dc9c
[ "contextlib", "datetime", "importlib", "os", "importlib.metadata.version", "typing.Any", "typing.Callable", "typing.Dict", "typing.List", "typing.Optional", "typing.Tuple", "packaging.version.parse", "requests.HTTPError", "requests.Response" ]
langchain.utils.mock_now
Function
https://api.python.langchain.com/en/latest/utils/langchain.utils.mock_now.html#langchain.utils.mock_now
@contextlib.contextmanager def mock_now(dt_value): # type: ignore """Context manager for mocking out datetime.now() in unit tests. Example: with mock_now(datetime.datetime(2011, 2, 3, 10, 11)): assert datetime.datetime.now() == datetime.datetime(2011, 2, 3, 10, 11) """ class MockDateTime(datetime.datetime): """Mock datetime.datetime.now() with a fixed datetime.""" @classmethod def now(cls): # type: ignore # Create a copy of dt_value. return datetime.datetime( dt_value.year, dt_value.month, dt_value.day, dt_value.hour, dt_value.minute, dt_value.second, dt_value.microsecond, dt_value.tzinfo, ) real_datetime = datetime.datetime datetime.datetime = MockDateTime try: yield datetime.datetime finally: datetime.datetime = real_datetime
langchain.utils.mock_now¶ langchain.utils.mock_now(dt_value)[source]¶ Context manager for mocking out datetime.now() in unit tests. Example: with mock_now(datetime.datetime(2011, 2, 3, 10, 11)): assert datetime.datetime.now() == datetime.datetime(2011, 2, 3, 10, 11)
Context manager for mocking out datetime.now() in unit tests.
3d614fed-a424-4d60-8818-067bb7c8c19e
[ "contextlib", "datetime", "importlib", "os", "importlib.metadata.version", "typing.Any", "typing.Callable", "typing.Dict", "typing.List", "typing.Optional", "typing.Tuple", "packaging.version.parse", "requests.HTTPError", "requests.Response" ]
langchain.utils.guard_import
Function
https://api.python.langchain.com/en/latest/utils/langchain.utils.guard_import.html#langchain.utils.guard_import
def guard_import( module_name: str, *, pip_name: Optional[str] = None, package: Optional[str] = None ) -> Any: """Dynamically imports a module and raises a helpful exception if the module is not installed.""" try: module = importlib.import_module(module_name, package) except ImportError: raise ImportError( f"Could not import {module_name} python package. " f"Please install it with `pip install {pip_name or module_name}`." ) return module
langchain.utils.guard_import¶ langchain.utils.guard_import(module_name: str, *, pip_name: Optional[str] = None, package: Optional[str] = None) → Any[source]¶ Dynamically imports a module and raises a helpful exception if the module is not installed.
Dynamically imports a module and raises a helpful exception if the module is not installed.
6320f27b-1af7-49cb-a1bb-8c0486d26685
[ "contextlib", "datetime", "importlib", "os", "importlib.metadata.version", "typing.Any", "typing.Callable", "typing.Dict", "typing.List", "typing.Optional", "typing.Tuple", "packaging.version.parse", "requests.HTTPError", "requests.Response" ]
langchain.utils.check_package_version
Function
https://api.python.langchain.com/en/latest/utils/langchain.utils.check_package_version.html#langchain.utils.check_package_version
def check_package_version( package: str, lt_version: Optional[str] = None, lte_version: Optional[str] = None, gt_version: Optional[str] = None, gte_version: Optional[str] = None, ) -> None: """Check the version of a package.""" imported_version = parse(version(package)) if lt_version is not None and imported_version >= parse(lt_version): raise ValueError( f"Expected {package} version to be < {lt_version}. Received " f"{imported_version}." ) if lte_version is not None and imported_version > parse(lte_version): raise ValueError( f"Expected {package} version to be <= {lte_version}. Received " f"{imported_version}." ) if gt_version is not None and imported_version <= parse(gt_version): raise ValueError( f"Expected {package} version to be > {gt_version}. Received " f"{imported_version}." ) if gte_version is not None and imported_version < parse(gte_version): raise ValueError( f"Expected {package} version to be >= {gte_version}. Received " f"{imported_version}." )
langchain.utils.check_package_version¶ langchain.utils.check_package_version(package: str, lt_version: Optional[str] = None, lte_version: Optional[str] = None, gt_version: Optional[str] = None, gte_version: Optional[str] = None) → None[source]¶ Check the version of a package.
Check the version of a package.
d2c1e778-48e2-4dc9-9d58-b9338d8ac985
[ "string.Formatter", "typing.Any", "typing.List", "typing.Mapping", "typing.Sequence", "typing.Union" ]
langchain.formatting.StrictFormatter
Class
https://api.python.langchain.com/en/latest/formatting/langchain.formatting.StrictFormatter.html#langchain.formatting.StrictFormatter
class StrictFormatter(Formatter): """A subclass of formatter that checks for extra keys.""" def check_unused_args( self, used_args: Sequence[Union[int, str]], args: Sequence, kwargs: Mapping[str, Any], ) -> None: """Check to see if extra parameters are passed.""" extra = set(kwargs).difference(used_args) if extra: raise KeyError(extra) def vformat( self, format_string: str, args: Sequence, kwargs: Mapping[str, Any] ) -> str: """Check that no arguments are provided.""" if len(args) > 0: raise ValueError( "No arguments should be provided, " "everything should be passed as keyword arguments." ) return super().vformat(format_string, args, kwargs) def validate_input_variables( self, format_string: str, input_variables: List[str] ) -> None: dummy_inputs = {input_variable: "foo" for input_variable in input_variables} super().format(format_string, **dummy_inputs)
langchain.formatting.StrictFormatter¶ class langchain.formatting.StrictFormatter[source]¶ Bases: Formatter A subclass of formatter that checks for extra keys. Methods __init__() check_unused_args(used_args, args, kwargs) Check to see if extra parameters are passed. convert_field(value, conversion) format(format_string, /, *args, **kwargs) format_field(value, format_spec) get_field(field_name, args, kwargs) get_value(key, args, kwargs) parse(format_string) validate_input_variables(format_string, ...) vformat(format_string, args, kwargs) Check that no arguments are provided. check_unused_args(used_args: Sequence[Union[int, str]], args: Sequence, kwargs: Mapping[str, Any]) → None[source]¶ Check to see if extra parameters are passed. convert_field(value, conversion)¶ format(format_string, /, *args, **kwargs)¶ format_field(value, format_spec)¶ get_field(field_name, args, kwargs)¶ get_value(key, args, kwargs)¶ parse(format_string)¶ validate_input_variables(format_string: str, input_variables: List[str]) → None[source]¶ vformat(format_string: str, args: Sequence, kwargs: Mapping[str, Any]) → str[source]¶ Check that no arguments are provided.
A subclass of formatter that checks for extra keys.
e86a06b5-063a-48c7-90e1-037b4c7a8dec
[ "typing.Dict", "typing.List", "typing.Optional", "typing.TextIO" ]
langchain.input.get_color_mapping
Function
https://api.python.langchain.com/en/latest/input/langchain.input.get_color_mapping.html#langchain.input.get_color_mapping
def get_color_mapping( items: List[str], excluded_colors: Optional[List] = None ) -> Dict[str, str]: """Get mapping for items to a support color.""" colors = list(_TEXT_COLOR_MAPPING.keys()) if excluded_colors is not None: colors = [c for c in colors if c not in excluded_colors] color_mapping = {item: colors[i % len(colors)] for i, item in enumerate(items)} return color_mapping
langchain.input.get_color_mapping¶ langchain.input.get_color_mapping(items: List[str], excluded_colors: Optional[List] = None) → Dict[str, str][source]¶ Get mapping for items to a support color.
Get mapping for items to a support color.
48b6a8c6-f58f-4398-8855-ad5a2b55e4e9
[ "typing.Dict", "typing.List", "typing.Optional", "typing.TextIO" ]
langchain.input.get_colored_text
Function
https://api.python.langchain.com/en/latest/input/langchain.input.get_colored_text.html#langchain.input.get_colored_text
def get_colored_text(text: str, color: str) -> str: """Get colored text.""" color_str = _TEXT_COLOR_MAPPING[color] return f"\u001b[{color_str}m\033[1;3m{text}\u001b[0m"
langchain.input.get_colored_text¶ langchain.input.get_colored_text(text: str, color: str) → str[source]¶ Get colored text.
Get colored text.
5bd6d1c5-712c-461e-a6b2-ee161822f131
[ "typing.Dict", "typing.List", "typing.Optional", "typing.TextIO" ]
langchain.input.get_bolded_text
Function
https://api.python.langchain.com/en/latest/input/langchain.input.get_bolded_text.html#langchain.input.get_bolded_text
def get_bolded_text(text: str) -> str: """Get bolded text.""" return f"\033[1m{text}\033[0m"
langchain.input.get_bolded_text¶ langchain.input.get_bolded_text(text: str) → str[source]¶ Get bolded text.
Get bolded text.
28f93a58-0b83-4c30-af83-bcf12aa4bf64
[ "typing.Dict", "typing.List", "typing.Optional", "typing.TextIO" ]
langchain.input.print_text
Function
https://api.python.langchain.com/en/latest/input/langchain.input.print_text.html#langchain.input.print_text
def print_text( text: str, color: Optional[str] = None, end: str = "", file: Optional[TextIO] = None ) -> None: """Print text with highlighting and no end characters.""" text_to_print = get_colored_text(text, color) if color else text print(text_to_print, end=end, file=file) if file: file.flush() # ensure all printed content are written to file
langchain.input.print_text¶ langchain.input.print_text(text: str, color: Optional[str] = None, end: str = '', file: Optional[TextIO] = None) → None[source]¶ Print text with highlighting and no end characters.
Print text with highlighting and no end characters.
b2688e46-9faf-4a7b-9659-ae9bca67dc96
[ "contextlib.asynccontextmanager", "typing.Any", "typing.AsyncGenerator", "typing.Dict", "typing.Optional", "aiohttp", "requests", "pydantic.BaseModel", "pydantic.Extra" ]
langchain.requests.Requests
Class
https://api.python.langchain.com/en/latest/requests/langchain.requests.Requests.html#langchain.requests.Requests
class Requests(BaseModel): """Wrapper around requests to handle auth and async. The main purpose of this wrapper is to handle authentication (by saving headers) and enable easy async methods on the same base object. """ headers: Optional[Dict[str, str]] = None aiosession: Optional[aiohttp.ClientSession] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def get(self, url: str, **kwargs: Any) -> requests.Response: """GET the URL and return the text.""" return requests.get(url, headers=self.headers, **kwargs) def post(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response: """POST to the URL and return the text.""" return requests.post(url, json=data, headers=self.headers, **kwargs) def patch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response: """PATCH the URL and return the text.""" return requests.patch(url, json=data, headers=self.headers, **kwargs) def put(self, url: str, data: Dict[str, Any], **kwargs: Any) -> requests.Response: """PUT the URL and return the text.""" return requests.put(url, json=data, headers=self.headers, **kwargs) def delete(self, url: str, **kwargs: Any) -> requests.Response: """DELETE the URL and return the text.""" return requests.delete(url, headers=self.headers, **kwargs) @asynccontextmanager async def _arequest( self, method: str, url: str, **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """Make an async request.""" if not self.aiosession: async with aiohttp.ClientSession() as session: async with session.request( method, url, headers=self.headers, **kwargs ) as response: yield response else: async with self.aiosession.request( method, url, headers=self.headers, **kwargs ) as response: yield response @asynccontextmanager async def aget( self, url: str, **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """GET the URL and return the text asynchronously.""" async with self._arequest("GET", url, **kwargs) as response: yield response @asynccontextmanager async def apost( self, url: str, data: Dict[str, Any], **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """POST to the URL and return the text asynchronously.""" async with self._arequest("POST", url, json=data, **kwargs) as response: yield response @asynccontextmanager async def apatch( self, url: str, data: Dict[str, Any], **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """PATCH the URL and return the text asynchronously.""" async with self._arequest("PATCH", url, json=data, **kwargs) as response: yield response @asynccontextmanager async def aput( self, url: str, data: Dict[str, Any], **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """PUT the URL and return the text asynchronously.""" async with self._arequest("PUT", url, json=data, **kwargs) as response: yield response @asynccontextmanager async def adelete( self, url: str, **kwargs: Any ) -> AsyncGenerator[aiohttp.ClientResponse, None]: """DELETE the URL and return the text asynchronously.""" async with self._arequest("DELETE", url, **kwargs) as response: yield response
langchain.requests.Requests¶ class langchain.requests.Requests(*, headers: Optional[Dict[str, str]] = None, aiosession: Optional[ClientSession] = None)[source]¶ Bases: BaseModel Wrapper around requests to handle auth and async. The main purpose of this wrapper is to handle authentication (by saving headers) and enable easy async methods on the same base object. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param aiosession: Optional[aiohttp.client.ClientSession] = None¶ param headers: Optional[Dict[str, str]] = None¶ adelete(url: str, **kwargs: Any) → AsyncGenerator[ClientResponse, None][source]¶ DELETE the URL and return the text asynchronously. aget(url: str, **kwargs: Any) → AsyncGenerator[ClientResponse, None][source]¶ GET the URL and return the text asynchronously. apatch(url: str, data: Dict[str, Any], **kwargs: Any) → AsyncGenerator[ClientResponse, None][source]¶ PATCH the URL and return the text asynchronously. apost(url: str, data: Dict[str, Any], **kwargs: Any) → AsyncGenerator[ClientResponse, None][source]¶ POST to the URL and return the text asynchronously. aput(url: str, data: Dict[str, Any], **kwargs: Any) → AsyncGenerator[ClientResponse, None][source]¶ PUT the URL and return the text asynchronously. delete(url: str, **kwargs: Any) → Response[source]¶ DELETE the URL and return the text. get(url: str, **kwargs: Any) → Response[source]¶ GET the URL and return the text. patch(url: str, data: Dict[str, Any], **kwargs: Any) → Response[source]¶ PATCH the URL and return the text. post(url: str, data: Dict[str, Any], **kwargs: Any) → Response[source]¶ POST to the URL and return the text. put(url: str, data: Dict[str, Any], **kwargs: Any) → Response[source]¶ PUT the URL and return the text. model Config[source]¶ Bases: object Configuration for this pydantic object. arbitrary_types_allowed = True¶ extra = 'forbid'¶
Wrapper around requests to handle auth and async.
80bfc5c4-fd74-43d5-a46d-d1e0ccfe185e
[ "contextlib.asynccontextmanager", "typing.Any", "typing.AsyncGenerator", "typing.Dict", "typing.Optional", "aiohttp", "requests", "pydantic.BaseModel", "pydantic.Extra" ]
langchain.requests.TextRequestsWrapper
Class
https://api.python.langchain.com/en/latest/requests/langchain.requests.TextRequestsWrapper.html#langchain.requests.TextRequestsWrapper
class TextRequestsWrapper(BaseModel): """Lightweight wrapper around requests library. The main purpose of this wrapper is to always return a text output. """ headers: Optional[Dict[str, str]] = None aiosession: Optional[aiohttp.ClientSession] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def requests(self) -> Requests: return Requests(headers=self.headers, aiosession=self.aiosession) def get(self, url: str, **kwargs: Any) -> str: """GET the URL and return the text.""" return self.requests.get(url, **kwargs).text def post(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """POST to the URL and return the text.""" return self.requests.post(url, data, **kwargs).text def patch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """PATCH the URL and return the text.""" return self.requests.patch(url, data, **kwargs).text def put(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """PUT the URL and return the text.""" return self.requests.put(url, data, **kwargs).text def delete(self, url: str, **kwargs: Any) -> str: """DELETE the URL and return the text.""" return self.requests.delete(url, **kwargs).text async def aget(self, url: str, **kwargs: Any) -> str: """GET the URL and return the text asynchronously.""" async with self.requests.aget(url, **kwargs) as response: return await response.text() async def apost(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """POST to the URL and return the text asynchronously.""" async with self.requests.apost(url, data, **kwargs) as response: return await response.text() async def apatch(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """PATCH the URL and return the text asynchronously.""" async with self.requests.apatch(url, data, **kwargs) as response: return await response.text() async def aput(self, url: str, data: Dict[str, Any], **kwargs: Any) -> str: """PUT the URL and return the text asynchronously.""" async with self.requests.aput(url, data, **kwargs) as response: return await response.text() async def adelete(self, url: str, **kwargs: Any) -> str: """DELETE the URL and return the text asynchronously.""" async with self.requests.adelete(url, **kwargs) as response: return await response.text()
langchain.requests.TextRequestsWrapper¶ class langchain.requests.TextRequestsWrapper(*, headers: Optional[Dict[str, str]] = None, aiosession: Optional[ClientSession] = None)[source]¶ Bases: BaseModel Lightweight wrapper around requests library. The main purpose of this wrapper is to always return a text output. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param aiosession: Optional[aiohttp.client.ClientSession] = None¶ param headers: Optional[Dict[str, str]] = None¶ async adelete(url: str, **kwargs: Any) → str[source]¶ DELETE the URL and return the text asynchronously. async aget(url: str, **kwargs: Any) → str[source]¶ GET the URL and return the text asynchronously. async apatch(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]¶ PATCH the URL and return the text asynchronously. async apost(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]¶ POST to the URL and return the text asynchronously. async aput(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]¶ PUT the URL and return the text asynchronously. delete(url: str, **kwargs: Any) → str[source]¶ DELETE the URL and return the text. get(url: str, **kwargs: Any) → str[source]¶ GET the URL and return the text. patch(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]¶ PATCH the URL and return the text. post(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]¶ POST to the URL and return the text. put(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]¶ PUT the URL and return the text. property requests: langchain.requests.Requests¶ model Config[source]¶ Bases: object Configuration for this pydantic object. arbitrary_types_allowed = True¶ extra = 'forbid'¶
Lightweight wrapper around requests library.
69773811-acef-4f1d-b265-38bcc0498209
[ "__future__.annotations", "re", "typing.Dict", "typing.Optional", "langchain.schema.BaseOutputParser" ]
langchain.output_parsers.regex_dict.RegexDictParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.regex_dict.RegexDictParser.html#langchain.output_parsers.regex_dict.RegexDictParser
class RegexDictParser(BaseOutputParser): """Class to parse the output into a dictionary.""" regex_pattern: str = r"{}:\s?([^.'\n']*)\.?" # : :meta private: output_key_to_format: Dict[str, str] no_update_value: Optional[str] = None @property def _type(self) -> str: """Return the type key.""" return "regex_dict_parser" def parse(self, text: str) -> Dict[str, str]: """Parse the output of an LLM call.""" result = {} for output_key, expected_format in self.output_key_to_format.items(): specific_regex = self.regex_pattern.format(re.escape(expected_format)) matches = re.findall(specific_regex, text) if not matches: raise ValueError( f"No match found for output key: {output_key} with expected format \ {expected_format} on text {text}" ) elif len(matches) > 1: raise ValueError( f"Multiple matches found for output key: {output_key} with \ expected format {expected_format} on text {text}" ) elif ( self.no_update_value is not None and matches[0] == self.no_update_value ): continue else: result[output_key] = matches[0] return result
langchain.output_parsers.regex_dict.RegexDictParser¶ class langchain.output_parsers.regex_dict.RegexDictParser(*, regex_pattern: str = "{}:\\s?([^.'\\n']*)\\.?", output_key_to_format: Dict[str, str], no_update_value: Optional[str] = None)[source]¶ Bases: BaseOutputParser Class to parse the output into a dictionary. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param no_update_value: Optional[str] = None¶ param output_key_to_format: Dict[str, str] [Required]¶ param regex_pattern: str = "{}:\\s?([^.'\\n']*)\\.?"¶ dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. get_format_instructions() → str¶ Instructions on how the LLM output should be formatted. parse(text: str) → Dict[str, str][source]¶ Parse the output of an LLM call. parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt: PromptValue) → Any¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Class to parse the output into a dictionary.
e32f0ab5-b9fe-47af-830c-0f6578fb5f79
[ "langchain.output_parsers.regex.RegexParser" ]
langchain.output_parsers.loading.load_output_parser
Function
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.loading.load_output_parser.html#langchain.output_parsers.loading.load_output_parser
def load_output_parser(config: dict) -> dict: """Load output parser.""" if "output_parsers" in config: if config["output_parsers"] is not None: _config = config["output_parsers"] output_parser_type = _config["_type"] if output_parser_type == "regex_parser": output_parser = RegexParser(**_config) else: raise ValueError(f"Unsupported output parser {output_parser_type}") config["output_parsers"] = output_parser return config
langchain.output_parsers.loading.load_output_parser¶ langchain.output_parsers.loading.load_output_parser(config: dict) → dict[source]¶ Load output parser.
Load output parser.
a20b680f-0a74-452f-a028-5d6f8b56aaf1
[ "json", "re", "typing.Type", "typing.TypeVar", "pydantic.BaseModel", "pydantic.ValidationError", "langchain.output_parsers.format_instructions.PYDANTIC_FORMAT_INSTRUCTIONS", "langchain.schema.BaseOutputParser", "langchain.schema.OutputParserException" ]
langchain.output_parsers.pydantic.PydanticOutputParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.pydantic.PydanticOutputParser.html#langchain.output_parsers.pydantic.PydanticOutputParser
class PydanticOutputParser(BaseOutputParser[T]): pydantic_object: Type[T] def parse(self, text: str) -> T: try: # Greedy search for 1st json candidate. match = re.search( r"\{.*\}", text.strip(), re.MULTILINE | re.IGNORECASE | re.DOTALL ) json_str = "" if match: json_str = match.group() json_object = json.loads(json_str, strict=False) return self.pydantic_object.parse_obj(json_object) except (json.JSONDecodeError, ValidationError) as e: name = self.pydantic_object.__name__ msg = f"Failed to parse {name} from completion {text}. Got: {e}" raise OutputParserException(msg, llm_output=text) def get_format_instructions(self) -> str: schema = self.pydantic_object.schema() # Remove extraneous fields. reduced_schema = schema if "title" in reduced_schema: del reduced_schema["title"] if "type" in reduced_schema: del reduced_schema["type"] # Ensure json in context is well-formed with double quotes. schema_str = json.dumps(reduced_schema) return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str) @property def _type(self) -> str: return "pydantic"
langchain.output_parsers.pydantic.PydanticOutputParser¶ class langchain.output_parsers.pydantic.PydanticOutputParser(*, pydantic_object: Type[T])[source]¶ Bases: BaseOutputParser[T] Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param pydantic_object: Type[langchain.output_parsers.pydantic.T] [Required]¶ dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. get_format_instructions() → str[source]¶ Instructions on how the LLM output should be formatted. parse(text: str) → T[source]¶ Parse a single string model output into some structure. Parameters text – String output of language model. Returns Structured output. parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt: PromptValue) → Any¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Create a new model by parsing and validating input data from keyword arguments.
6bc79171-b158-44ac-8f9d-67bef6b212d0
[ "__future__.annotations", "typing.Any", "typing.Dict", "typing.List", "pydantic.root_validator", "langchain.schema.BaseOutputParser" ]
langchain.output_parsers.combining.CombiningOutputParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.combining.CombiningOutputParser.html#langchain.output_parsers.combining.CombiningOutputParser
class CombiningOutputParser(BaseOutputParser): """Class to combine multiple output parsers into one.""" @property def lc_serializable(self) -> bool: return True parsers: List[BaseOutputParser] @root_validator() def validate_parsers(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Validate the parsers.""" parsers = values["parsers"] if len(parsers) < 2: raise ValueError("Must have at least two parsers") for parser in parsers: if parser._type == "combining": raise ValueError("Cannot nest combining parsers") if parser._type == "list": raise ValueError("Cannot comine list parsers") return values @property def _type(self) -> str: """Return the type key.""" return "combining" def get_format_instructions(self) -> str: """Instructions on how the LLM output should be formatted.""" initial = f"For your first output: {self.parsers[0].get_format_instructions()}" subsequent = "\n".join( f"Complete that output fully. Then produce another output, separated by two newline characters: {p.get_format_instructions()}" # noqa: E501 for p in self.parsers[1:] ) return f"{initial}\n{subsequent}" def parse(self, text: str) -> Dict[str, Any]: """Parse the output of an LLM call.""" texts = text.split("\n\n") output = dict() for txt, parser in zip(texts, self.parsers): output.update(parser.parse(txt.strip())) return output
langchain.output_parsers.combining.CombiningOutputParser¶ class langchain.output_parsers.combining.CombiningOutputParser(*, parsers: List[BaseOutputParser])[source]¶ Bases: BaseOutputParser Class to combine multiple output parsers into one. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param parsers: List[langchain.schema.output_parser.BaseOutputParser] [Required]¶ dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. get_format_instructions() → str[source]¶ Instructions on how the LLM output should be formatted. parse(text: str) → Dict[str, Any][source]¶ Parse the output of an LLM call. parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt: PromptValue) → Any¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ validator validate_parsers  »  all fields[source]¶ Validate the parsers. property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Class to combine multiple output parsers into one.
4fa8096b-b87b-4e5f-9629-cb5839888c17
[ "__future__.annotations", "typing.Any", "typing.Callable", "typing.Dict", "typing.Optional", "langchain.schema.BaseOutputParser" ]
langchain.output_parsers.rail_parser.GuardrailsOutputParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.rail_parser.GuardrailsOutputParser.html#langchain.output_parsers.rail_parser.GuardrailsOutputParser
class GuardrailsOutputParser(BaseOutputParser): guard: Any api: Optional[Callable] args: Any kwargs: Any @property def _type(self) -> str: return "guardrails" @classmethod def from_rail( cls, rail_file: str, num_reasks: int = 1, api: Optional[Callable] = None, *args: Any, **kwargs: Any, ) -> GuardrailsOutputParser: try: from guardrails import Guard except ImportError: raise ValueError( "guardrails-ai package not installed. " "Install it by running `pip install guardrails-ai`." ) return cls( guard=Guard.from_rail(rail_file, num_reasks=num_reasks), api=api, args=args, kwargs=kwargs, ) @classmethod def from_rail_string( cls, rail_str: str, num_reasks: int = 1, api: Optional[Callable] = None, *args: Any, **kwargs: Any, ) -> GuardrailsOutputParser: try: from guardrails import Guard except ImportError: raise ValueError( "guardrails-ai package not installed. " "Install it by running `pip install guardrails-ai`." ) return cls( guard=Guard.from_rail_string(rail_str, num_reasks=num_reasks), api=api, args=args, kwargs=kwargs, ) @classmethod def from_pydantic( cls, output_class: Any, num_reasks: int = 1, api: Optional[Callable] = None, *args: Any, **kwargs: Any, ) -> GuardrailsOutputParser: try: from guardrails import Guard except ImportError: raise ValueError( "guardrails-ai package not installed. " "Install it by running `pip install guardrails-ai`." ) return cls( guard=Guard.from_pydantic(output_class, "", num_reasks=num_reasks), api=api, args=args, kwargs=kwargs, ) def get_format_instructions(self) -> str: return self.guard.raw_prompt.format_instructions def parse(self, text: str) -> Dict: return self.guard.parse(text, llm_api=self.api, *self.args, **self.kwargs)
langchain.output_parsers.rail_parser.GuardrailsOutputParser¶ class langchain.output_parsers.rail_parser.GuardrailsOutputParser(*, guard: Any = None, api: Optional[Callable] = None, args: Any = None, kwargs: Any = None)[source]¶ Bases: BaseOutputParser Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param api: Optional[Callable] = None¶ param args: Any = None¶ param guard: Any = None¶ param kwargs: Any = None¶ dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. classmethod from_pydantic(output_class: Any, num_reasks: int = 1, api: Optional[Callable] = None, *args: Any, **kwargs: Any) → GuardrailsOutputParser[source]¶ classmethod from_rail(rail_file: str, num_reasks: int = 1, api: Optional[Callable] = None, *args: Any, **kwargs: Any) → GuardrailsOutputParser[source]¶ classmethod from_rail_string(rail_str: str, num_reasks: int = 1, api: Optional[Callable] = None, *args: Any, **kwargs: Any) → GuardrailsOutputParser[source]¶ get_format_instructions() → str[source]¶ Instructions on how the LLM output should be formatted. parse(text: str) → Dict[source]¶ Parse a single string model output into some structure. Parameters text – String output of language model. Returns Structured output. parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt: PromptValue) → Any¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Create a new model by parsing and validating input data from keyword arguments.
ce8e4290-7242-43b5-b0c9-60aa0dd98fc2
[ "random", "datetime.datetime", "datetime.timedelta", "typing.List", "langchain.schema.BaseOutputParser", "langchain.schema.OutputParserException", "langchain.utils.comma_list" ]
langchain.output_parsers.datetime.DatetimeOutputParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.datetime.DatetimeOutputParser.html#langchain.output_parsers.datetime.DatetimeOutputParser
class DatetimeOutputParser(BaseOutputParser[datetime]): format: str = "%Y-%m-%dT%H:%M:%S.%fZ" def get_format_instructions(self) -> str: examples = comma_list(_generate_random_datetime_strings(self.format)) return f"""Write a datetime string that matches the following pattern: "{self.format}". Examples: {examples}""" def parse(self, response: str) -> datetime: try: return datetime.strptime(response.strip(), self.format) except ValueError as e: raise OutputParserException( f"Could not parse datetime string: {response}" ) from e @property def _type(self) -> str: return "datetime"
langchain.output_parsers.datetime.DatetimeOutputParser¶ class langchain.output_parsers.datetime.DatetimeOutputParser(*, format: str = '%Y-%m-%dT%H:%M:%S.%fZ')[source]¶ Bases: BaseOutputParser[datetime] Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param format: str = '%Y-%m-%dT%H:%M:%S.%fZ'¶ dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. get_format_instructions() → str[source]¶ Instructions on how the LLM output should be formatted. parse(response: str) → datetime[source]¶ Parse a single string model output into some structure. Parameters text – String output of language model. Returns Structured output. parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt: PromptValue) → Any¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Create a new model by parsing and validating input data from keyword arguments.
19402094-bb17-491e-aefc-d92648beca4b
[ "__future__.annotations", "typing.Any", "typing.List", "pydantic.BaseModel", "langchain.output_parsers.format_instructions.STRUCTURED_FORMAT_INSTRUCTIONS", "langchain.output_parsers.format_instructions.STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS", "langchain.output_parsers.json.parse_and_check_json_markdown", "langchain.schema.BaseOutputParser" ]
langchain.output_parsers.structured.ResponseSchema
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.structured.ResponseSchema.html#langchain.output_parsers.structured.ResponseSchema
class ResponseSchema(BaseModel): name: str description: str type: str = "string"
langchain.output_parsers.structured.ResponseSchema¶ class langchain.output_parsers.structured.ResponseSchema(*, name: str, description: str, type: str = 'string')[source]¶ Bases: BaseModel Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param description: str [Required]¶ param name: str [Required]¶ param type: str = 'string'¶
Create a new model by parsing and validating input data from keyword arguments.
c93dff27-4dae-42a0-be12-911a6def217b
[ "__future__.annotations", "typing.Any", "typing.List", "pydantic.BaseModel", "langchain.output_parsers.format_instructions.STRUCTURED_FORMAT_INSTRUCTIONS", "langchain.output_parsers.format_instructions.STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS", "langchain.output_parsers.json.parse_and_check_json_markdown", "langchain.schema.BaseOutputParser" ]
langchain.output_parsers.structured.StructuredOutputParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.structured.StructuredOutputParser.html#langchain.output_parsers.structured.StructuredOutputParser
class StructuredOutputParser(BaseOutputParser): response_schemas: List[ResponseSchema] @classmethod def from_response_schemas( cls, response_schemas: List[ResponseSchema] ) -> StructuredOutputParser: return cls(response_schemas=response_schemas) def get_format_instructions(self, only_json: bool = False) -> str: """ Method to get the format instructions for the output parser. example: ```python from langchain.output_parsers.structured import ( StructuredOutputParser, ResponseSchema ) response_schemas = [ ResponseSchema( name="foo", description="a list of strings", type="List[string]" ), ResponseSchema( name="bar", description="a string", type="string" ), ] parser = StructuredOutputParser.from_response_schemas(response_schemas) print(parser.get_format_instructions()) output: # The output should be a markdown code snippet formatted in the following # schema, including the leading and trailing "```json" and "```": # # ```json # { # "foo": List[string] // a list of strings # "bar": string // a string # } Args: only_json (bool): If True, only the json in the markdown code snippet will be returned, without the introducing text. Defaults to False. """ schema_str = "\n".join( [_get_sub_string(schema) for schema in self.response_schemas] ) if only_json: return STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS.format(format=schema_str) else: return STRUCTURED_FORMAT_INSTRUCTIONS.format(format=schema_str) def parse(self, text: str) -> Any: expected_keys = [rs.name for rs in self.response_schemas] return parse_and_check_json_markdown(text, expected_keys) @property def _type(self) -> str: return "structured"
langchain.output_parsers.structured.StructuredOutputParser¶ class langchain.output_parsers.structured.StructuredOutputParser(*, response_schemas: List[ResponseSchema])[source]¶ Bases: BaseOutputParser Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param response_schemas: List[langchain.output_parsers.structured.ResponseSchema] [Required]¶ dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. classmethod from_response_schemas(response_schemas: List[ResponseSchema]) → StructuredOutputParser[source]¶ get_format_instructions(only_json: bool = False) → str[source]¶ Method to get the format instructions for the output parser. example: ```python from langchain.output_parsers.structured import ( StructuredOutputParser, ResponseSchema ) response_schemas = [ ResponseSchema(name=”foo”, description=”a list of strings”, type=”List[string]” ), ResponseSchema(name=”bar”, description=”a string”, type=”string” ), ] parser = StructuredOutputParser.from_response_schemas(response_schemas) print(parser.get_format_instructions()) output: # The output should be a markdown code snippet formatted in the following # schema, including the leading and trailing “`json" and "`”: # # ```json # { # “foo”: List[string] // a list of strings # “bar”: string // a string # } Parameters only_json (bool) – If True, only the json in the markdown code snippet will be returned, without the introducing text. Defaults to False. parse(text: str) → Any[source]¶ Parse a single string model output into some structure. Parameters text – String output of language model. Returns Structured output. parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt: PromptValue) → Any¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Create a new model by parsing and validating input data from keyword arguments.
e0d6ec2d-61a9-40a7-a86f-f6967692592e
[ "__future__.annotations", "typing.TypeVar", "langchain.chains.llm.LLMChain", "langchain.output_parsers.prompts.NAIVE_FIX_PROMPT", "langchain.schema.BaseOutputParser", "langchain.schema.BasePromptTemplate", "langchain.schema.OutputParserException", "langchain.schema.language_model.BaseLanguageModel" ]
langchain.output_parsers.fix.OutputFixingParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.fix.OutputFixingParser.html#langchain.output_parsers.fix.OutputFixingParser
class OutputFixingParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors.""" @property def lc_serializable(self) -> bool: return True parser: BaseOutputParser[T] retry_chain: LLMChain @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_FIX_PROMPT, ) -> OutputFixingParser[T]: chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain) def parse(self, completion: str) -> T: try: parsed_completion = self.parser.parse(completion) except OutputParserException as e: new_completion = self.retry_chain.run( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) parsed_completion = self.parser.parse(new_completion) return parsed_completion def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "output_fixing"
langchain.output_parsers.fix.OutputFixingParser¶ class langchain.output_parsers.fix.OutputFixingParser(*, parser: BaseOutputParser[T], retry_chain: LLMChain)[source]¶ Bases: BaseOutputParser[T] Wraps a parser and tries to fix parsing errors. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param parser: langchain.schema.output_parser.BaseOutputParser[langchain.output_parsers.fix.T] [Required]¶ param retry_chain: langchain.chains.llm.LLMChain [Required]¶ dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. classmethod from_llm(llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = PromptTemplate(input_variables=['completion', 'error', 'instructions'], output_parser=None, partial_variables={}, template='Instructions:\n--------------\n{instructions}\n--------------\nCompletion:\n--------------\n{completion}\n--------------\n\nAbove, the Completion did not satisfy the constraints given in the Instructions.\nError:\n--------------\n{error}\n--------------\n\nPlease try again. Please only respond with an answer that satisfies the constraints laid out in the Instructions:', template_format='f-string', validate_template=True)) → OutputFixingParser[T][source]¶ get_format_instructions() → str[source]¶ Instructions on how the LLM output should be formatted. parse(completion: str) → T[source]¶ Parse a single string model output into some structure. Parameters text – String output of language model. Returns Structured output. parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt: PromptValue) → Any¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Wraps a parser and tries to fix parsing errors.
d4c94855-5dd9-450f-8808-d533a3154deb
[ "json", "typing.Any", "typing.Dict", "typing.List", "typing.Type", "typing.Union", "pydantic.BaseModel", "pydantic.root_validator", "langchain.schema.BaseLLMOutputParser", "langchain.schema.ChatGeneration", "langchain.schema.Generation", "langchain.schema.OutputParserException" ]
langchain.output_parsers.openai_functions.OutputFunctionsParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.openai_functions.OutputFunctionsParser.html#langchain.output_parsers.openai_functions.OutputFunctionsParser
class OutputFunctionsParser(BaseLLMOutputParser[Any]): args_only: bool = True def parse_result(self, result: List[Generation]) -> Any: generation = result[0] if not isinstance(generation, ChatGeneration): raise OutputParserException( "This output parser can only be used with a chat generation." ) message = generation.message try: func_call = message.additional_kwargs["function_call"] except ValueError as exc: raise OutputParserException(f"Could not parse function call: {exc}") if self.args_only: return func_call["arguments"] return func_call
langchain.output_parsers.openai_functions.OutputFunctionsParser¶ class langchain.output_parsers.openai_functions.OutputFunctionsParser(*, args_only: bool = True)[source]¶ Bases: BaseLLMOutputParser[Any] Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param args_only: bool = True¶ parse_result(result: List[Generation]) → Any[source]¶ Parse a list of candidate model Generations into a specific format. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Create a new model by parsing and validating input data from keyword arguments.
61537331-3487-43af-925c-173f1138c852
[ "json", "typing.Any", "typing.Dict", "typing.List", "typing.Type", "typing.Union", "pydantic.BaseModel", "pydantic.root_validator", "langchain.schema.BaseLLMOutputParser", "langchain.schema.ChatGeneration", "langchain.schema.Generation", "langchain.schema.OutputParserException" ]
langchain.output_parsers.openai_functions.JsonOutputFunctionsParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.openai_functions.JsonOutputFunctionsParser.html#langchain.output_parsers.openai_functions.JsonOutputFunctionsParser
class JsonOutputFunctionsParser(OutputFunctionsParser): def parse_result(self, result: List[Generation]) -> Any: func = super().parse_result(result) if self.args_only: return json.loads(func) func["arguments"] = json.loads(func["arguments"]) return func
langchain.output_parsers.openai_functions.JsonOutputFunctionsParser¶ class langchain.output_parsers.openai_functions.JsonOutputFunctionsParser(*, args_only: bool = True)[source]¶ Bases: OutputFunctionsParser Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param args_only: bool = True¶ parse_result(result: List[Generation]) → Any[source]¶ Parse a list of candidate model Generations into a specific format. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Create a new model by parsing and validating input data from keyword arguments.
0351f189-2f71-4480-bdc6-997e475f3738
[ "json", "typing.Any", "typing.Dict", "typing.List", "typing.Type", "typing.Union", "pydantic.BaseModel", "pydantic.root_validator", "langchain.schema.BaseLLMOutputParser", "langchain.schema.ChatGeneration", "langchain.schema.Generation", "langchain.schema.OutputParserException" ]
langchain.output_parsers.openai_functions.JsonKeyOutputFunctionsParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.openai_functions.JsonKeyOutputFunctionsParser.html#langchain.output_parsers.openai_functions.JsonKeyOutputFunctionsParser
class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser): key_name: str def parse_result(self, result: List[Generation]) -> Any: res = super().parse_result(result) return res[self.key_name]
langchain.output_parsers.openai_functions.JsonKeyOutputFunctionsParser¶ class langchain.output_parsers.openai_functions.JsonKeyOutputFunctionsParser(*, args_only: bool = True, key_name: str)[source]¶ Bases: JsonOutputFunctionsParser Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param args_only: bool = True¶ param key_name: str [Required]¶ parse_result(result: List[Generation]) → Any[source]¶ Parse a list of candidate model Generations into a specific format. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Create a new model by parsing and validating input data from keyword arguments.
b05cb18a-6451-448f-846d-8456e5b887b2
[ "json", "typing.Any", "typing.Dict", "typing.List", "typing.Type", "typing.Union", "pydantic.BaseModel", "pydantic.root_validator", "langchain.schema.BaseLLMOutputParser", "langchain.schema.ChatGeneration", "langchain.schema.Generation", "langchain.schema.OutputParserException" ]
langchain.output_parsers.openai_functions.PydanticOutputFunctionsParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.openai_functions.PydanticOutputFunctionsParser.html#langchain.output_parsers.openai_functions.PydanticOutputFunctionsParser
class PydanticOutputFunctionsParser(OutputFunctionsParser): pydantic_schema: Union[Type[BaseModel], Dict[str, Type[BaseModel]]] @root_validator(pre=True) def validate_schema(cls, values: Dict) -> Dict: schema = values["pydantic_schema"] if "args_only" not in values: values["args_only"] = isinstance(schema, type) and issubclass( schema, BaseModel ) elif values["args_only"] and isinstance(schema, Dict): raise ValueError( "If multiple pydantic schemas are provided then args_only should be" " False." ) return values def parse_result(self, result: List[Generation]) -> Any: _result = super().parse_result(result) if self.args_only: pydantic_args = self.pydantic_schema.parse_raw(_result) # type: ignore else: fn_name = _result["name"] _args = _result["arguments"] pydantic_args = self.pydantic_schema[fn_name].parse_raw(_args) # type: ignore # noqa: E501 return pydantic_args
langchain.output_parsers.openai_functions.PydanticOutputFunctionsParser¶ class langchain.output_parsers.openai_functions.PydanticOutputFunctionsParser(*, args_only: bool = True, pydantic_schema: Union[Type[BaseModel], Dict[str, Type[BaseModel]]])[source]¶ Bases: OutputFunctionsParser Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param args_only: bool = True¶ param pydantic_schema: Union[Type[pydantic.main.BaseModel], Dict[str, Type[pydantic.main.BaseModel]]] [Required]¶ parse_result(result: List[Generation]) → Any[source]¶ Parse a list of candidate model Generations into a specific format. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ validator validate_schema  »  all fields[source]¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Create a new model by parsing and validating input data from keyword arguments.
1a447c18-4935-449a-88ca-90b429353509
[ "json", "typing.Any", "typing.Dict", "typing.List", "typing.Type", "typing.Union", "pydantic.BaseModel", "pydantic.root_validator", "langchain.schema.BaseLLMOutputParser", "langchain.schema.ChatGeneration", "langchain.schema.Generation", "langchain.schema.OutputParserException" ]
langchain.output_parsers.openai_functions.PydanticAttrOutputFunctionsParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.openai_functions.PydanticAttrOutputFunctionsParser.html#langchain.output_parsers.openai_functions.PydanticAttrOutputFunctionsParser
class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser): attr_name: str def parse_result(self, result: List[Generation]) -> Any: result = super().parse_result(result) return getattr(result, self.attr_name)
langchain.output_parsers.openai_functions.PydanticAttrOutputFunctionsParser¶ class langchain.output_parsers.openai_functions.PydanticAttrOutputFunctionsParser(*, args_only: bool = True, pydantic_schema: Union[Type[BaseModel], Dict[str, Type[BaseModel]]], attr_name: str)[source]¶ Bases: PydanticOutputFunctionsParser Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param args_only: bool = True¶ param attr_name: str [Required]¶ param pydantic_schema: Union[Type[pydantic.main.BaseModel], Dict[str, Type[pydantic.main.BaseModel]]] [Required]¶ parse_result(result: List[Generation]) → Any[source]¶ Parse a list of candidate model Generations into a specific format. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ validator validate_schema  »  all fields¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Create a new model by parsing and validating input data from keyword arguments.
0a4f91a5-48a5-4c17-8129-4a04ac02f7ea
[ "langchain.schema.BaseOutputParser" ]
langchain.output_parsers.boolean.BooleanOutputParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.boolean.BooleanOutputParser.html#langchain.output_parsers.boolean.BooleanOutputParser
class BooleanOutputParser(BaseOutputParser[bool]): true_val: str = "YES" false_val: str = "NO" def parse(self, text: str) -> bool: """Parse the output of an LLM call to a boolean. Args: text: output of language model Returns: boolean """ cleaned_text = text.strip() if cleaned_text.upper() not in (self.true_val.upper(), self.false_val.upper()): raise ValueError( f"BooleanOutputParser expected output value to either be " f"{self.true_val} or {self.false_val}. Received {cleaned_text}." ) return cleaned_text.upper() == self.true_val.upper() @property def _type(self) -> str: """Snake-case string identifier for output parser type.""" return "boolean_output_parser"
langchain.output_parsers.boolean.BooleanOutputParser¶ class langchain.output_parsers.boolean.BooleanOutputParser(*, true_val: str = 'YES', false_val: str = 'NO')[source]¶ Bases: BaseOutputParser[bool] Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param false_val: str = 'NO'¶ param true_val: str = 'YES'¶ dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. get_format_instructions() → str¶ Instructions on how the LLM output should be formatted. parse(text: str) → bool[source]¶ Parse the output of an LLM call to a boolean. Parameters text – output of language model Returns boolean parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt: PromptValue) → Any¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Create a new model by parsing and validating input data from keyword arguments.
3020aea8-fa63-4953-b110-c31577f99c04
[ "__future__.annotations", "typing.TypeVar", "langchain.chains.llm.LLMChain", "langchain.prompts.prompt.PromptTemplate", "langchain.schema.BaseOutputParser", "langchain.schema.BasePromptTemplate", "langchain.schema.OutputParserException", "langchain.schema.PromptValue", "langchain.schema.language_model.BaseLanguageModel" ]
langchain.output_parsers.retry.RetryOutputParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.retry.RetryOutputParser.html#langchain.output_parsers.retry.RetryOutputParser
class RetryOutputParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors. Does this by passing the original prompt and the completion to another LLM, and telling it the completion did not satisfy criteria in the prompt. """ parser: BaseOutputParser[T] retry_chain: LLMChain @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_RETRY_PROMPT, ) -> RetryOutputParser[T]: chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain) def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: try: parsed_completion = self.parser.parse(completion) except OutputParserException: new_completion = self.retry_chain.run( prompt=prompt_value.to_string(), completion=completion ) parsed_completion = self.parser.parse(new_completion) return parsed_completion def parse(self, completion: str) -> T: raise NotImplementedError( "This OutputParser can only be called by the `parse_with_prompt` method." ) def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "retry"
langchain.output_parsers.retry.RetryOutputParser¶ class langchain.output_parsers.retry.RetryOutputParser(*, parser: BaseOutputParser[T], retry_chain: LLMChain)[source]¶ Bases: BaseOutputParser[T] Wraps a parser and tries to fix parsing errors. Does this by passing the original prompt and the completion to another LLM, and telling it the completion did not satisfy criteria in the prompt. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param parser: langchain.schema.output_parser.BaseOutputParser[langchain.output_parsers.retry.T] [Required]¶ param retry_chain: langchain.chains.llm.LLMChain [Required]¶ dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. classmethod from_llm(llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = PromptTemplate(input_variables=['completion', 'prompt'], output_parser=None, partial_variables={}, template='Prompt:\n{prompt}\nCompletion:\n{completion}\n\nAbove, the Completion did not satisfy the constraints given in the Prompt.\nPlease try again:', template_format='f-string', validate_template=True)) → RetryOutputParser[T][source]¶ get_format_instructions() → str[source]¶ Instructions on how the LLM output should be formatted. parse(completion: str) → T[source]¶ Parse a single string model output into some structure. Parameters text – String output of language model. Returns Structured output. parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt_value: PromptValue) → T[source]¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Wraps a parser and tries to fix parsing errors.
4b3fb942-4e3a-4bee-92eb-2937e6b7890d
[ "__future__.annotations", "typing.TypeVar", "langchain.chains.llm.LLMChain", "langchain.prompts.prompt.PromptTemplate", "langchain.schema.BaseOutputParser", "langchain.schema.BasePromptTemplate", "langchain.schema.OutputParserException", "langchain.schema.PromptValue", "langchain.schema.language_model.BaseLanguageModel" ]
langchain.output_parsers.retry.RetryWithErrorOutputParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.retry.RetryWithErrorOutputParser.html#langchain.output_parsers.retry.RetryWithErrorOutputParser
class RetryWithErrorOutputParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors. Does this by passing the original prompt, the completion, AND the error that was raised to another language model and telling it that the completion did not work, and raised the given error. Differs from RetryOutputParser in that this implementation provides the error that was raised back to the LLM, which in theory should give it more information on how to fix it. """ parser: BaseOutputParser[T] retry_chain: LLMChain @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_RETRY_WITH_ERROR_PROMPT, ) -> RetryWithErrorOutputParser[T]: chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain) def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T: try: parsed_completion = self.parser.parse(completion) except OutputParserException as e: new_completion = self.retry_chain.run( prompt=prompt_value.to_string(), completion=completion, error=repr(e) ) parsed_completion = self.parser.parse(new_completion) return parsed_completion def parse(self, completion: str) -> T: raise NotImplementedError( "This OutputParser can only be called by the `parse_with_prompt` method." ) def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "retry_with_error"
langchain.output_parsers.retry.RetryWithErrorOutputParser¶ class langchain.output_parsers.retry.RetryWithErrorOutputParser(*, parser: BaseOutputParser[T], retry_chain: LLMChain)[source]¶ Bases: BaseOutputParser[T] Wraps a parser and tries to fix parsing errors. Does this by passing the original prompt, the completion, AND the error that was raised to another language model and telling it that the completion did not work, and raised the given error. Differs from RetryOutputParser in that this implementation provides the error that was raised back to the LLM, which in theory should give it more information on how to fix it. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param parser: langchain.schema.output_parser.BaseOutputParser[langchain.output_parsers.retry.T] [Required]¶ param retry_chain: langchain.chains.llm.LLMChain [Required]¶ dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. classmethod from_llm(llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = PromptTemplate(input_variables=['completion', 'error', 'prompt'], output_parser=None, partial_variables={}, template='Prompt:\n{prompt}\nCompletion:\n{completion}\n\nAbove, the Completion did not satisfy the constraints given in the Prompt.\nDetails: {error}\nPlease try again:', template_format='f-string', validate_template=True)) → RetryWithErrorOutputParser[T][source]¶ get_format_instructions() → str[source]¶ Instructions on how the LLM output should be formatted. parse(completion: str) → T[source]¶ Parse a single string model output into some structure. Parameters text – String output of language model. Returns Structured output. parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt_value: PromptValue) → T[source]¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Wraps a parser and tries to fix parsing errors.
3eded9c7-13b4-4b22-906e-3bec3569103a
[ "__future__.annotations", "json", "re", "typing.List", "langchain.schema.OutputParserException" ]
langchain.output_parsers.json.parse_json_markdown
Function
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.json.parse_json_markdown.html#langchain.output_parsers.json.parse_json_markdown
def parse_json_markdown(json_string: str) -> dict: """ Parse a JSON string from a Markdown string. Args: json_string: The Markdown string. Returns: The parsed JSON object as a Python dictionary. """ # Try to find JSON string within triple backticks match = re.search(r"```(json)?(.*?)```", json_string, re.DOTALL) # If no match found, assume the entire string is a JSON string if match is None: json_str = json_string else: # If match found, use the content within the backticks json_str = match.group(2) # Strip whitespace and newlines from the start and end json_str = json_str.strip() # Parse the JSON string into a Python dictionary parsed = json.loads(json_str) return parsed
langchain.output_parsers.json.parse_json_markdown¶ langchain.output_parsers.json.parse_json_markdown(json_string: str) → dict[source]¶ Parse a JSON string from a Markdown string. Parameters json_string – The Markdown string. Returns The parsed JSON object as a Python dictionary.
Parse a JSON string from a Markdown string.
ef6799dc-5eda-458a-ba62-e29631ee1fca
[ "__future__.annotations", "json", "re", "typing.List", "langchain.schema.OutputParserException" ]
langchain.output_parsers.json.parse_and_check_json_markdown
Function
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.json.parse_and_check_json_markdown.html#langchain.output_parsers.json.parse_and_check_json_markdown
def parse_and_check_json_markdown(text: str, expected_keys: List[str]) -> dict: """ Parse a JSON string from a Markdown string and check that it contains the expected keys. Args: text: The Markdown string. expected_keys: The expected keys in the JSON string. Returns: The parsed JSON object as a Python dictionary. """ try: json_obj = parse_json_markdown(text) except json.JSONDecodeError as e: raise OutputParserException(f"Got invalid JSON object. Error: {e}") for key in expected_keys: if key not in json_obj: raise OutputParserException( f"Got invalid return object. Expected key `{key}` " f"to be present, but got {json_obj}" ) return json_obj
langchain.output_parsers.json.parse_and_check_json_markdown¶ langchain.output_parsers.json.parse_and_check_json_markdown(text: str, expected_keys: List[str]) → dict[source]¶ Parse a JSON string from a Markdown string and check that it contains the expected keys. Parameters text – The Markdown string. expected_keys – The expected keys in the JSON string. Returns The parsed JSON object as a Python dictionary.
Parse a JSON string from a Markdown string and check that it contains the expected keys.
cabb02f2-7312-4a10-a201-5250c4fe0f95
[ "enum.Enum", "typing.Any", "typing.Dict", "typing.List", "typing.Type", "pydantic.root_validator", "langchain.schema.BaseOutputParser", "langchain.schema.OutputParserException" ]
langchain.output_parsers.enum.EnumOutputParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.enum.EnumOutputParser.html#langchain.output_parsers.enum.EnumOutputParser
class EnumOutputParser(BaseOutputParser): enum: Type[Enum] @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: enum = values["enum"] if not all(isinstance(e.value, str) for e in enum): raise ValueError("Enum values must be strings") return values @property def _valid_values(self) -> List[str]: return [e.value for e in self.enum] def parse(self, response: str) -> Any: try: return self.enum(response.strip()) except ValueError: raise OutputParserException( f"Response '{response}' is not one of the " f"expected values: {self._valid_values}" ) def get_format_instructions(self) -> str: return f"Select one of the following options: {', '.join(self._valid_values)}"
langchain.output_parsers.enum.EnumOutputParser¶ class langchain.output_parsers.enum.EnumOutputParser(*, enum: Type[Enum])[source]¶ Bases: BaseOutputParser Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param enum: Type[enum.Enum] [Required]¶ dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. get_format_instructions() → str[source]¶ Instructions on how the LLM output should be formatted. parse(response: str) → Any[source]¶ Parse a single string model output into some structure. Parameters text – String output of language model. Returns Structured output. parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt: PromptValue) → Any¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output validator raise_deprecation  »  all fields[source]¶ to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Create a new model by parsing and validating input data from keyword arguments.
9a758ecb-c5cf-4963-9f02-77a19a8856a6
[ "__future__.annotations", "abc.abstractmethod", "typing.List", "langchain.schema.BaseOutputParser" ]
langchain.output_parsers.list.ListOutputParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.list.ListOutputParser.html#langchain.output_parsers.list.ListOutputParser
class ListOutputParser(BaseOutputParser): """Class to parse the output of an LLM call to a list.""" @property def _type(self) -> str: return "list" @abstractmethod def parse(self, text: str) -> List[str]: """Parse the output of an LLM call."""
langchain.output_parsers.list.ListOutputParser¶ class langchain.output_parsers.list.ListOutputParser[source]¶ Bases: BaseOutputParser Class to parse the output of an LLM call to a list. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. get_format_instructions() → str¶ Instructions on how the LLM output should be formatted. abstract parse(text: str) → List[str][source]¶ Parse the output of an LLM call. parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt: PromptValue) → Any¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Class to parse the output of an LLM call to a list.
6b900cff-7051-41fc-a6e5-cb86bce03422
[ "__future__.annotations", "abc.abstractmethod", "typing.List", "langchain.schema.BaseOutputParser" ]
langchain.output_parsers.list.CommaSeparatedListOutputParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.list.CommaSeparatedListOutputParser.html#langchain.output_parsers.list.CommaSeparatedListOutputParser
class CommaSeparatedListOutputParser(ListOutputParser): """Parse out comma separated lists.""" @property def lc_serializable(self) -> bool: return True def get_format_instructions(self) -> str: return ( "Your response should be a list of comma separated values, " "eg: `foo, bar, baz`" ) def parse(self, text: str) -> List[str]: """Parse the output of an LLM call.""" return text.strip().split(", ")
langchain.output_parsers.list.CommaSeparatedListOutputParser¶ class langchain.output_parsers.list.CommaSeparatedListOutputParser[source]¶ Bases: ListOutputParser Parse out comma separated lists. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. get_format_instructions() → str[source]¶ Instructions on how the LLM output should be formatted. parse(text: str) → List[str][source]¶ Parse the output of an LLM call. parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt: PromptValue) → Any¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Parse out comma separated lists.
fe0c08a4-b898-4c53-8c68-b1753a0e5abc
[ "__future__.annotations", "re", "typing.Dict", "typing.List", "typing.Optional", "langchain.schema.BaseOutputParser" ]
langchain.output_parsers.regex.RegexParser
Class
https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.regex.RegexParser.html#langchain.output_parsers.regex.RegexParser
class RegexParser(BaseOutputParser): """Class to parse the output into a dictionary.""" @property def lc_serializable(self) -> bool: return True regex: str output_keys: List[str] default_output_key: Optional[str] = None @property def _type(self) -> str: """Return the type key.""" return "regex_parser" def parse(self, text: str) -> Dict[str, str]: """Parse the output of an LLM call.""" match = re.search(self.regex, text) if match: return {key: match.group(i + 1) for i, key in enumerate(self.output_keys)} else: if self.default_output_key is None: raise ValueError(f"Could not parse output: {text}") else: return { key: text if key == self.default_output_key else "" for key in self.output_keys }
langchain.output_parsers.regex.RegexParser¶ class langchain.output_parsers.regex.RegexParser(*, regex: str, output_keys: List[str], default_output_key: Optional[str] = None)[source]¶ Bases: BaseOutputParser Class to parse the output into a dictionary. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param default_output_key: Optional[str] = None¶ param output_keys: List[str] [Required]¶ param regex: str [Required]¶ dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. get_format_instructions() → str¶ Instructions on how the LLM output should be formatted. parse(text: str) → Dict[str, str][source]¶ Parse the output of an LLM call. parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt: PromptValue) → Any¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Class to parse the output into a dictionary.
69775681-c202-42cd-b447-f4fcac14b845
[ "typing.Any", "typing.Dict", "typing.List", "typing.Optional", "pydantic.Field", "langchain.callbacks.manager.CallbackManagerForChainRun", "langchain.chains.base.Chain", "langchain.experimental.plan_and_execute.executors.base.BaseExecutor", "langchain.experimental.plan_and_execute.planners.base.BasePlanner", "langchain.experimental.plan_and_execute.schema.BaseStepContainer", "langchain.experimental.plan_and_execute.schema.ListStepContainer" ]
langchain.experimental.plan_and_execute.agent_executor.PlanAndExecute
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.plan_and_execute.agent_executor.PlanAndExecute.html#langchain.experimental.plan_and_execute.agent_executor.PlanAndExecute
class PlanAndExecute(Chain): planner: BasePlanner executor: BaseExecutor step_container: BaseStepContainer = Field(default_factory=ListStepContainer) input_key: str = "input" output_key: str = "output" @property def input_keys(self) -> List[str]: return [self.input_key] @property def output_keys(self) -> List[str]: return [self.output_key] def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: plan = self.planner.plan( inputs, callbacks=run_manager.get_child() if run_manager else None, ) if run_manager: run_manager.on_text(str(plan), verbose=self.verbose) for step in plan.steps: _new_inputs = { "previous_steps": self.step_container, "current_step": step, "objective": inputs[self.input_key], } new_inputs = {**_new_inputs, **inputs} response = self.executor.step( new_inputs, callbacks=run_manager.get_child() if run_manager else None, ) if run_manager: run_manager.on_text( f"*****\n\nStep: {step.value}", verbose=self.verbose ) run_manager.on_text( f"\n\nResponse: {response.response}", verbose=self.verbose ) self.step_container.add_step(step, response) return {self.output_key: self.step_container.get_final_response()}
langchain.experimental.plan_and_execute.agent_executor.PlanAndExecute¶ class langchain.experimental.plan_and_execute.agent_executor.PlanAndExecute(*, memory: Optional[BaseMemory] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, verbose: bool = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, planner: BasePlanner, executor: BaseExecutor, step_container: BaseStepContainer = None, input_key: str = 'input', output_key: str = 'output')[source]¶ Bases: Chain Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param callback_manager: Optional[BaseCallbackManager] = None¶ Deprecated, use callbacks instead. param callbacks: Callbacks = None¶ Optional list of callback handlers (or callback manager). Defaults to None. Callback handlers are called throughout the lifecycle of a call to a chain, starting with on_chain_start, ending with on_chain_end or on_chain_error. Each custom chain can optionally call additional callback methods, see Callback docs for full details. param executor: langchain.experimental.plan_and_execute.executors.base.BaseExecutor [Required]¶ param input_key: str = 'input'¶ param memory: Optional[BaseMemory] = None¶ Optional memory object. Defaults to None. Memory is a class that gets called at the start and at the end of every chain. At the start, memory loads variables and passes them along in the chain. At the end, it saves any returned variables. There are many different types of memory - please see memory docs for the full catalog. param metadata: Optional[Dict[str, Any]] = None¶ Optional metadata associated with the chain. Defaults to None This metadata will be associated with each call to this chain, and passed as arguments to the handlers defined in callbacks. You can use these to eg identify a specific instance of a chain with its use case. param output_key: str = 'output'¶ param planner: langchain.experimental.plan_and_execute.planners.base.BasePlanner [Required]¶ param step_container: langchain.experimental.plan_and_execute.schema.BaseStepContainer [Optional]¶ param tags: Optional[List[str]] = None¶ Optional list of tags associated with the chain. Defaults to None These tags will be associated with each call to this chain, and passed as arguments to the handlers defined in callbacks. You can use these to eg identify a specific instance of a chain with its use case. param verbose: bool [Optional]¶ Whether or not run in verbose mode. In verbose mode, some intermediate logs will be printed to the console. Defaults to langchain.verbose value. __call__(inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False) → Dict[str, Any]¶ Execute the chain. Parameters inputs – Dictionary of inputs, or single input if chain expects only one param. Should contain all inputs specified in Chain.input_keys except for inputs that will be set by the chain’s memory. return_only_outputs – Whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. metadata – Optional metadata associated with the chain. Defaults to None include_run_info – Whether to include run info in the response. Defaults to False. Returns A dict of named outputs. Should contain all outputs specified inChain.output_keys. async acall(inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False) → Dict[str, Any]¶ Asynchronously execute the chain. Parameters inputs – Dictionary of inputs, or single input if chain expects only one param. Should contain all inputs specified in Chain.input_keys except for inputs that will be set by the chain’s memory. return_only_outputs – Whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. metadata – Optional metadata associated with the chain. Defaults to None include_run_info – Whether to include run info in the response. Defaults to False. Returns A dict of named outputs. Should contain all outputs specified inChain.output_keys. apply(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None) → List[Dict[str, str]]¶ Call the chain on all inputs in the list. async arun(*args: Any, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → str¶ Convenience method for executing chain when there’s a single string output. The main difference between this method and Chain.__call__ is that this methodcan only be used for chains that return a single string output. If a Chain has more outputs, a non-string output, or you want to return the inputs/run info along with the outputs, use Chain.__call__. The other difference is that this method expects inputs to be passed directly in as positional arguments or keyword arguments, whereas Chain.__call__ expects a single input dictionary with all the inputs. Parameters *args – If the chain expects a single input, it can be passed in as the sole positional argument. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. **kwargs – If the chain expects multiple inputs, they can be passed in directly as keyword arguments. Returns The chain output as a string. Example # Suppose we have a single-input chain that takes a 'question' string: await chain.arun("What's the temperature in Boise, Idaho?") # -> "The temperature in Boise is..." # Suppose we have a multi-input chain that takes a 'question' string # and 'context' string: question = "What's the temperature in Boise, Idaho?" context = "Weather report for Boise, Idaho on 07/03/23..." await chain.arun(question=question, context=context) # -> "The temperature in Boise is..." dict(**kwargs: Any) → Dict¶ Return dictionary representation of chain. Expects Chain._chain_type property to be implemented and for memory to benull. Parameters **kwargs – Keyword arguments passed to default pydantic.BaseModel.dict method. Returns A dictionary representation of the chain. Example ..code-block:: python chain.dict(exclude_unset=True) # -> {“_type”: “foo”, “verbose”: False, …} prep_inputs(inputs: Union[Dict[str, Any], Any]) → Dict[str, str]¶ Validate and prepare chain inputs, including adding inputs from memory. Parameters inputs – Dictionary of raw inputs, or single input if chain expects only one param. Should contain all inputs specified in Chain.input_keys except for inputs that will be set by the chain’s memory. Returns A dictionary of all inputs, including those added by the chain’s memory. prep_outputs(inputs: Dict[str, str], outputs: Dict[str, str], return_only_outputs: bool = False) → Dict[str, str]¶ Validate and prepare chain outputs, and save info about this run to memory. Parameters inputs – Dictionary of chain inputs, including any inputs added by chain memory. outputs – Dictionary of initial chain outputs. return_only_outputs – Whether to only return the chain outputs. If False, inputs are also added to the final outputs. Returns A dict of the final chain outputs. validator raise_callback_manager_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. run(*args: Any, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → str¶ Convenience method for executing chain when there’s a single string output. The main difference between this method and Chain.__call__ is that this methodcan only be used for chains that return a single string output. If a Chain has more outputs, a non-string output, or you want to return the inputs/run info along with the outputs, use Chain.__call__. The other difference is that this method expects inputs to be passed directly in as positional arguments or keyword arguments, whereas Chain.__call__ expects a single input dictionary with all the inputs. Parameters *args – If the chain expects a single input, it can be passed in as the sole positional argument. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. **kwargs – If the chain expects multiple inputs, they can be passed in directly as keyword arguments. Returns The chain output as a string. Example # Suppose we have a single-input chain that takes a 'question' string: chain.run("What's the temperature in Boise, Idaho?") # -> "The temperature in Boise is..." # Suppose we have a multi-input chain that takes a 'question' string # and 'context' string: question = "What's the temperature in Boise, Idaho?" context = "Weather report for Boise, Idaho on 07/03/23..." chain.run(question=question, context=context) # -> "The temperature in Boise is..." save(file_path: Union[Path, str]) → None¶ Save the chain. Expects Chain._chain_type property to be implemented and for memory to benull. Parameters file_path – Path to file to save the chain to. Example chain.save(file_path="path/chain.yaml") validator set_verbose  »  verbose¶ Set the chain verbosity. Defaults to the global setting if not specified by the user. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property input_keys: List[str]¶ Return the keys expected to be in the chain input. property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. property output_keys: List[str]¶ Return the keys expected to be in the chain output. model Config¶ Bases: object Configuration for this pydantic object. arbitrary_types_allowed = True¶
Create a new model by parsing and validating input data from keyword arguments.
3cf9fe6d-8fbb-406b-bf8c-429f8b593028
[ "abc.abstractmethod", "typing.List", "typing.Tuple", "pydantic.BaseModel", "pydantic.Field", "langchain.schema.BaseOutputParser" ]
langchain.experimental.plan_and_execute.schema.Step
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.plan_and_execute.schema.Step.html#langchain.experimental.plan_and_execute.schema.Step
class Step(BaseModel): value: str
langchain.experimental.plan_and_execute.schema.Step¶ class langchain.experimental.plan_and_execute.schema.Step(*, value: str)[source]¶ Bases: BaseModel Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param value: str [Required]¶
Create a new model by parsing and validating input data from keyword arguments.
e4ef4d2e-4ab6-44b0-90c0-662f5dd9c797
[ "abc.abstractmethod", "typing.List", "typing.Tuple", "pydantic.BaseModel", "pydantic.Field", "langchain.schema.BaseOutputParser" ]
langchain.experimental.plan_and_execute.schema.Plan
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.plan_and_execute.schema.Plan.html#langchain.experimental.plan_and_execute.schema.Plan
class Plan(BaseModel): steps: List[Step]
langchain.experimental.plan_and_execute.schema.Plan¶ class langchain.experimental.plan_and_execute.schema.Plan(*, steps: List[Step])[source]¶ Bases: BaseModel Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param steps: List[langchain.experimental.plan_and_execute.schema.Step] [Required]¶
Create a new model by parsing and validating input data from keyword arguments.
0377a3b9-fd37-442e-813c-66a8b6f2a9a0
[ "abc.abstractmethod", "typing.List", "typing.Tuple", "pydantic.BaseModel", "pydantic.Field", "langchain.schema.BaseOutputParser" ]
langchain.experimental.plan_and_execute.schema.StepResponse
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.plan_and_execute.schema.StepResponse.html#langchain.experimental.plan_and_execute.schema.StepResponse
class StepResponse(BaseModel): response: str
langchain.experimental.plan_and_execute.schema.StepResponse¶ class langchain.experimental.plan_and_execute.schema.StepResponse(*, response: str)[source]¶ Bases: BaseModel Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param response: str [Required]¶
Create a new model by parsing and validating input data from keyword arguments.
69d2894e-985e-4c89-ba57-846aa3e34a73
[ "abc.abstractmethod", "typing.List", "typing.Tuple", "pydantic.BaseModel", "pydantic.Field", "langchain.schema.BaseOutputParser" ]
langchain.experimental.plan_and_execute.schema.BaseStepContainer
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.plan_and_execute.schema.BaseStepContainer.html#langchain.experimental.plan_and_execute.schema.BaseStepContainer
class BaseStepContainer(BaseModel): @abstractmethod def add_step(self, step: Step, step_response: StepResponse) -> None: """Add step and step response to the container.""" @abstractmethod def get_final_response(self) -> str: """Return the final response based on steps taken."""
langchain.experimental.plan_and_execute.schema.BaseStepContainer¶ class langchain.experimental.plan_and_execute.schema.BaseStepContainer[source]¶ Bases: BaseModel Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. abstract add_step(step: Step, step_response: StepResponse) → None[source]¶ Add step and step response to the container. abstract get_final_response() → str[source]¶ Return the final response based on steps taken.
Create a new model by parsing and validating input data from keyword arguments.
99b8fd01-a937-43a8-bd03-156da161be5a
[ "abc.abstractmethod", "typing.List", "typing.Tuple", "pydantic.BaseModel", "pydantic.Field", "langchain.schema.BaseOutputParser" ]
langchain.experimental.plan_and_execute.schema.ListStepContainer
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.plan_and_execute.schema.ListStepContainer.html#langchain.experimental.plan_and_execute.schema.ListStepContainer
class ListStepContainer(BaseModel): steps: List[Tuple[Step, StepResponse]] = Field(default_factory=list) def add_step(self, step: Step, step_response: StepResponse) -> None: self.steps.append((step, step_response)) def get_steps(self) -> List[Tuple[Step, StepResponse]]: return self.steps def get_final_response(self) -> str: return self.steps[-1][1].response
langchain.experimental.plan_and_execute.schema.ListStepContainer¶ class langchain.experimental.plan_and_execute.schema.ListStepContainer(*, steps: List[Tuple[Step, StepResponse]] = None)[source]¶ Bases: BaseStepContainer Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param steps: List[Tuple[langchain.experimental.plan_and_execute.schema.Step, langchain.experimental.plan_and_execute.schema.StepResponse]] [Optional]¶ add_step(step: Step, step_response: StepResponse) → None[source]¶ Add step and step response to the container. get_final_response() → str[source]¶ Return the final response based on steps taken. get_steps() → List[Tuple[Step, StepResponse]][source]¶
Create a new model by parsing and validating input data from keyword arguments.
566dc495-a5cb-413d-bc89-e63e61c8b61d
[ "abc.abstractmethod", "typing.List", "typing.Tuple", "pydantic.BaseModel", "pydantic.Field", "langchain.schema.BaseOutputParser" ]
langchain.experimental.plan_and_execute.schema.PlanOutputParser
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.plan_and_execute.schema.PlanOutputParser.html#langchain.experimental.plan_and_execute.schema.PlanOutputParser
class PlanOutputParser(BaseOutputParser): @abstractmethod def parse(self, text: str) -> Plan: """Parse into a plan."""
langchain.experimental.plan_and_execute.schema.PlanOutputParser¶ class langchain.experimental.plan_and_execute.schema.PlanOutputParser[source]¶ Bases: BaseOutputParser Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. get_format_instructions() → str¶ Instructions on how the LLM output should be formatted. abstract parse(text: str) → Plan[source]¶ Parse into a plan. parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt: PromptValue) → Any¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Create a new model by parsing and validating input data from keyword arguments.
d2536bc3-6ffc-4f09-b27b-4f3bc76ef45a
[ "typing.List", "langchain.agents.agent.AgentExecutor", "langchain.agents.structured_chat.base.StructuredChatAgent", "langchain.experimental.plan_and_execute.executors.base.ChainExecutor", "langchain.schema.language_model.BaseLanguageModel", "langchain.tools.BaseTool" ]
langchain.experimental.plan_and_execute.executors.agent_executor.load_agent_executor
Function
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.plan_and_execute.executors.agent_executor.load_agent_executor.html#langchain.experimental.plan_and_execute.executors.agent_executor.load_agent_executor
def load_agent_executor( llm: BaseLanguageModel, tools: List[BaseTool], verbose: bool = False, include_task_in_prompt: bool = False, ) -> ChainExecutor: """ Load an agent executor. Args: llm: BaseLanguageModel tools: List[BaseTool] verbose: bool. Defaults to False. include_task_in_prompt: bool. Defaults to False. Returns: ChainExecutor """ input_variables = ["previous_steps", "current_step", "agent_scratchpad"] template = HUMAN_MESSAGE_TEMPLATE if include_task_in_prompt: input_variables.append("objective") template = TASK_PREFIX + template agent = StructuredChatAgent.from_llm_and_tools( llm, tools, human_message_template=template, input_variables=input_variables, ) agent_executor = AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, verbose=verbose ) return ChainExecutor(chain=agent_executor)
langchain.experimental.plan_and_execute.executors.agent_executor.load_agent_executor¶ langchain.experimental.plan_and_execute.executors.agent_executor.load_agent_executor(llm: BaseLanguageModel, tools: List[BaseTool], verbose: bool = False, include_task_in_prompt: bool = False) → ChainExecutor[source]¶ Load an agent executor. Parameters llm – BaseLanguageModel tools – List[BaseTool] verbose – bool. Defaults to False. include_task_in_prompt – bool. Defaults to False. Returns ChainExecutor
Load an agent executor.
026f931a-e32a-401b-ba97-e7c4dc589143
[ "abc.abstractmethod", "typing.Any", "pydantic.BaseModel", "langchain.callbacks.manager.Callbacks", "langchain.chains.base.Chain", "langchain.experimental.plan_and_execute.schema.StepResponse" ]
langchain.experimental.plan_and_execute.executors.base.BaseExecutor
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.plan_and_execute.executors.base.BaseExecutor.html#langchain.experimental.plan_and_execute.executors.base.BaseExecutor
class BaseExecutor(BaseModel): @abstractmethod def step( self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any ) -> StepResponse: """Take step.""" @abstractmethod async def astep( self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any ) -> StepResponse: """Take step."""
langchain.experimental.plan_and_execute.executors.base.BaseExecutor¶ class langchain.experimental.plan_and_execute.executors.base.BaseExecutor[source]¶ Bases: BaseModel Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. abstract async astep(inputs: dict, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → StepResponse[source]¶ Take step. abstract step(inputs: dict, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → StepResponse[source]¶ Take step.
Create a new model by parsing and validating input data from keyword arguments.
37d6690d-daad-4a8d-9a9f-92a998277a32
[ "abc.abstractmethod", "typing.Any", "pydantic.BaseModel", "langchain.callbacks.manager.Callbacks", "langchain.chains.base.Chain", "langchain.experimental.plan_and_execute.schema.StepResponse" ]
langchain.experimental.plan_and_execute.executors.base.ChainExecutor
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.plan_and_execute.executors.base.ChainExecutor.html#langchain.experimental.plan_and_execute.executors.base.ChainExecutor
class ChainExecutor(BaseExecutor): chain: Chain def step( self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any ) -> StepResponse: """Take step.""" response = self.chain.run(**inputs, callbacks=callbacks) return StepResponse(response=response) async def astep( self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any ) -> StepResponse: """Take step.""" response = await self.chain.arun(**inputs, callbacks=callbacks) return StepResponse(response=response)
langchain.experimental.plan_and_execute.executors.base.ChainExecutor¶ class langchain.experimental.plan_and_execute.executors.base.ChainExecutor(*, chain: Chain)[source]¶ Bases: BaseExecutor Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param chain: langchain.chains.base.Chain [Required]¶ async astep(inputs: dict, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → StepResponse[source]¶ Take step. step(inputs: dict, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → StepResponse[source]¶ Take step.
Create a new model by parsing and validating input data from keyword arguments.
1f5a7d17-a878-47b3-a12a-2eaaae754e61
[ "re", "langchain.chains.LLMChain", "langchain.experimental.plan_and_execute.planners.base.LLMPlanner", "langchain.experimental.plan_and_execute.schema.Plan", "langchain.experimental.plan_and_execute.schema.PlanOutputParser", "langchain.experimental.plan_and_execute.schema.Step", "langchain.prompts.ChatPromptTemplate", "langchain.prompts.HumanMessagePromptTemplate", "langchain.schema.language_model.BaseLanguageModel", "langchain.schema.messages.SystemMessage" ]
langchain.experimental.plan_and_execute.planners.chat_planner.PlanningOutputParser
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.plan_and_execute.planners.chat_planner.PlanningOutputParser.html#langchain.experimental.plan_and_execute.planners.chat_planner.PlanningOutputParser
class PlanningOutputParser(PlanOutputParser): def parse(self, text: str) -> Plan: steps = [Step(value=v) for v in re.split("\n\s*\d+\. ", text)[1:]] return Plan(steps=steps)
langchain.experimental.plan_and_execute.planners.chat_planner.PlanningOutputParser¶ class langchain.experimental.plan_and_execute.planners.chat_planner.PlanningOutputParser[source]¶ Bases: PlanOutputParser Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. get_format_instructions() → str¶ Instructions on how the LLM output should be formatted. parse(text: str) → Plan[source]¶ Parse into a plan. parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt: PromptValue) → Any¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Create a new model by parsing and validating input data from keyword arguments.
f302c018-5605-4bdf-82d8-cd2fe018e702
[ "re", "langchain.chains.LLMChain", "langchain.experimental.plan_and_execute.planners.base.LLMPlanner", "langchain.experimental.plan_and_execute.schema.Plan", "langchain.experimental.plan_and_execute.schema.PlanOutputParser", "langchain.experimental.plan_and_execute.schema.Step", "langchain.prompts.ChatPromptTemplate", "langchain.prompts.HumanMessagePromptTemplate", "langchain.schema.language_model.BaseLanguageModel", "langchain.schema.messages.SystemMessage" ]
langchain.experimental.plan_and_execute.planners.chat_planner.load_chat_planner
Function
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.plan_and_execute.planners.chat_planner.load_chat_planner.html#langchain.experimental.plan_and_execute.planners.chat_planner.load_chat_planner
def load_chat_planner( llm: BaseLanguageModel, system_prompt: str = SYSTEM_PROMPT ) -> LLMPlanner: """ Load a chat planner. Args: llm: Language model. system_prompt: System prompt. Returns: LLMPlanner """ prompt_template = ChatPromptTemplate.from_messages( [ SystemMessage(content=system_prompt), HumanMessagePromptTemplate.from_template("{input}"), ] ) llm_chain = LLMChain(llm=llm, prompt=prompt_template) return LLMPlanner( llm_chain=llm_chain, output_parser=PlanningOutputParser(), stop=["<END_OF_PLAN>"], )
langchain.experimental.plan_and_execute.planners.chat_planner.load_chat_planner¶ langchain.experimental.plan_and_execute.planners.chat_planner.load_chat_planner(llm: BaseLanguageModel, system_prompt: str = "Let's first understand the problem and devise a plan to solve the problem. Please output the plan starting with the header 'Plan:' and then followed by a numbered list of steps. Please make the plan the minimum number of steps required to accurately complete the task. If the task is a question, the final step should almost always be 'Given the above steps taken, please respond to the users original question'. At the end of your plan, say '<END_OF_PLAN>'") → LLMPlanner[source]¶ Load a chat planner. :param llm: Language model. :param system_prompt: System prompt. Returns LLMPlanner
Load a chat planner.
3b5c0c92-4f34-40aa-baf0-420c8974dc06
[ "abc.abstractmethod", "typing.Any", "typing.List", "typing.Optional", "pydantic.BaseModel", "langchain.callbacks.manager.Callbacks", "langchain.chains.llm.LLMChain", "langchain.experimental.plan_and_execute.schema.Plan", "langchain.experimental.plan_and_execute.schema.PlanOutputParser" ]
langchain.experimental.plan_and_execute.planners.base.BasePlanner
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.plan_and_execute.planners.base.BasePlanner.html#langchain.experimental.plan_and_execute.planners.base.BasePlanner
class BasePlanner(BaseModel): @abstractmethod def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan: """Given input, decide what to do.""" @abstractmethod async def aplan( self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any ) -> Plan: """Given input, decide what to do."""
langchain.experimental.plan_and_execute.planners.base.BasePlanner¶ class langchain.experimental.plan_and_execute.planners.base.BasePlanner[source]¶ Bases: BaseModel Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. abstract async aplan(inputs: dict, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → Plan[source]¶ Given input, decide what to do. abstract plan(inputs: dict, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → Plan[source]¶ Given input, decide what to do.
Create a new model by parsing and validating input data from keyword arguments.
63a98624-8aa1-49f5-811c-0ea3d41cf025
[ "abc.abstractmethod", "typing.Any", "typing.List", "typing.Optional", "pydantic.BaseModel", "langchain.callbacks.manager.Callbacks", "langchain.chains.llm.LLMChain", "langchain.experimental.plan_and_execute.schema.Plan", "langchain.experimental.plan_and_execute.schema.PlanOutputParser" ]
langchain.experimental.plan_and_execute.planners.base.LLMPlanner
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.plan_and_execute.planners.base.LLMPlanner.html#langchain.experimental.plan_and_execute.planners.base.LLMPlanner
class LLMPlanner(BasePlanner): llm_chain: LLMChain output_parser: PlanOutputParser stop: Optional[List] = None def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan: """Given input, decide what to do.""" llm_response = self.llm_chain.run(**inputs, stop=self.stop, callbacks=callbacks) return self.output_parser.parse(llm_response) async def aplan( self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any ) -> Plan: """Given input, decide what to do.""" llm_response = await self.llm_chain.arun( **inputs, stop=self.stop, callbacks=callbacks ) return self.output_parser.parse(llm_response)
langchain.experimental.plan_and_execute.planners.base.LLMPlanner¶ class langchain.experimental.plan_and_execute.planners.base.LLMPlanner(*, llm_chain: LLMChain, output_parser: PlanOutputParser, stop: Optional[List] = None)[source]¶ Bases: BasePlanner Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param llm_chain: langchain.chains.llm.LLMChain [Required]¶ param output_parser: langchain.experimental.plan_and_execute.schema.PlanOutputParser [Required]¶ param stop: Optional[List] = None¶ async aplan(inputs: dict, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → Plan[source]¶ Given input, decide what to do. plan(inputs: dict, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → Plan[source]¶ Given input, decide what to do.
Create a new model by parsing and validating input data from keyword arguments.
2ca0556f-037f-4f4d-98bf-e69b7d837727
[ "__future__.annotations", "json", "typing.TYPE_CHECKING", "typing.Any", "typing.List", "typing.Optional", "typing.cast", "pydantic.Field", "pydantic.root_validator", "langchain.callbacks.manager.CallbackManagerForLLMRun", "langchain.llms.huggingface_pipeline.HuggingFacePipeline", "jsonformer" ]
langchain.experimental.llms.jsonformer_decoder.import_jsonformer
Function
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.llms.jsonformer_decoder.import_jsonformer.html#langchain.experimental.llms.jsonformer_decoder.import_jsonformer
def import_jsonformer() -> jsonformer: """Lazily import jsonformer.""" try: import jsonformer except ImportError: raise ValueError( "Could not import jsonformer python package. " "Please install it with `pip install jsonformer`." ) return jsonformer
langchain.experimental.llms.jsonformer_decoder.import_jsonformer¶ langchain.experimental.llms.jsonformer_decoder.import_jsonformer() → jsonformer[source]¶ Lazily import jsonformer.
Lazily import jsonformer.
46520d31-232a-4b48-8454-254e7a83a6e9
[ "__future__.annotations", "json", "typing.TYPE_CHECKING", "typing.Any", "typing.List", "typing.Optional", "typing.cast", "pydantic.Field", "pydantic.root_validator", "langchain.callbacks.manager.CallbackManagerForLLMRun", "langchain.llms.huggingface_pipeline.HuggingFacePipeline", "jsonformer", "jsonformer" ]
langchain.experimental.llms.jsonformer_decoder.JsonFormer
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.llms.jsonformer_decoder.JsonFormer.html#langchain.experimental.llms.jsonformer_decoder.JsonFormer
class JsonFormer(HuggingFacePipeline): json_schema: dict = Field(..., description="The JSON Schema to complete.") max_new_tokens: int = Field( default=200, description="Maximum number of new tokens to generate." ) debug: bool = Field(default=False, description="Debug mode.") @root_validator def check_jsonformer_installation(cls, values: dict) -> dict: import_jsonformer() return values def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: jsonformer = import_jsonformer() from transformers import Text2TextGenerationPipeline pipeline = cast(Text2TextGenerationPipeline, self.pipeline) model = jsonformer.Jsonformer( model=pipeline.model, tokenizer=pipeline.tokenizer, json_schema=self.json_schema, prompt=prompt, max_number_tokens=self.max_new_tokens, debug=self.debug, ) text = model() return json.dumps(text)
langchain.experimental.llms.jsonformer_decoder.JsonFormer¶ class langchain.experimental.llms.jsonformer_decoder.JsonFormer(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, pipeline: Any = None, model_id: str = 'gpt2', model_kwargs: Optional[dict] = None, pipeline_kwargs: Optional[dict] = None, json_schema: dict, max_new_tokens: int = 200, debug: bool = False)[source]¶ Bases: HuggingFacePipeline Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param cache: Optional[bool] = None¶ param callback_manager: Optional[BaseCallbackManager] = None¶ param callbacks: Callbacks = None¶ param debug: bool = False¶ Debug mode. param json_schema: dict [Required]¶ The JSON Schema to complete. param max_new_tokens: int = 200¶ Maximum number of new tokens to generate. param metadata: Optional[Dict[str, Any]] = None¶ Metadata to add to the run trace. param model_id: str = 'gpt2'¶ Model name to use. param model_kwargs: Optional[dict] = None¶ Key word arguments passed to the model. param pipeline_kwargs: Optional[dict] = None¶ Key word arguments passed to the pipeline. param tags: Optional[List[str]] = None¶ Tags to add to the run trace. param verbose: bool [Optional]¶ Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → str¶ Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → LLMResult¶ Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Asynchronously pass a sequence of prompts and return model generations. This method should make use of batched calls for models that expose a batched API. Use this method when you want to: take advantage of batched calls, need more output from the model than just the top generated value, are building chains that are agnostic to the underlying language modeltype (e.g., pure text completion models vs chat models). Parameters prompts – List of PromptValues. A PromptValue is an object that can be converted to match the format of any language model (string for pure text generation models and BaseMessages for chat models). stop – Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. callbacks – Callbacks to pass through. Used for executing additional functionality, such as logging or streaming, throughout generation. **kwargs – Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns An LLMResult, which contains a list of candidate Generations for each inputprompt and additional model provider-specific output. async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Asynchronously pass a string to the model and return a string prediction. Use this method when calling pure text generation models and only the topcandidate generation is needed. Parameters text – String input to pass to the model. stop – Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. **kwargs – Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns Top model prediction as a string. async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Asynchronously pass messages to the model and return a message prediction. Use this method when calling chat models and only the topcandidate generation is needed. Parameters messages – A sequence of chat messages corresponding to a single model input. stop – Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. **kwargs – Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns Top model prediction as a message. validator check_jsonformer_installation  »  all fields[source]¶ dict(**kwargs: Any) → Dict¶ Return a dictionary of the LLM. classmethod from_model_id(model_id: str, task: str, device: int = - 1, model_kwargs: Optional[dict] = None, pipeline_kwargs: Optional[dict] = None, **kwargs: Any) → LLM¶ Construct the pipeline object from model_id and task. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → LLMResult¶ Run the LLM on the given prompt and input. generate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Pass a sequence of prompts to the model and return model generations. This method should make use of batched calls for models that expose a batched API. Use this method when you want to: take advantage of batched calls, need more output from the model than just the top generated value, are building chains that are agnostic to the underlying language modeltype (e.g., pure text completion models vs chat models). Parameters prompts – List of PromptValues. A PromptValue is an object that can be converted to match the format of any language model (string for pure text generation models and BaseMessages for chat models). stop – Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. callbacks – Callbacks to pass through. Used for executing additional functionality, such as logging or streaming, throughout generation. **kwargs – Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns An LLMResult, which contains a list of candidate Generations for each inputprompt and additional model provider-specific output. get_num_tokens(text: str) → int¶ Get the number of tokens present in the text. Useful for checking if an input will fit in a model’s context window. Parameters text – The string input to tokenize. Returns The integer number of tokens in the text. get_num_tokens_from_messages(messages: List[BaseMessage]) → int¶ Get the number of tokens in the messages. Useful for checking if an input will fit in a model’s context window. Parameters messages – The message inputs to tokenize. Returns The sum of the number of tokens across the messages. get_token_ids(text: str) → List[int]¶ Return the ordered ids of the tokens in a text. Parameters text – The string input to tokenize. Returns A list of ids corresponding to the tokens in the text, in order they occurin the text. predict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Pass a single string input to the model and return a string prediction. Use this method when passing in raw text. If you want to pass in specifictypes of chat messages, use predict_messages. Parameters text – String input to pass to the model. stop – Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. **kwargs – Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns Top model prediction as a string. predict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Pass a message sequence to the model and return a message prediction. Use this method when passing in chat messages. If you want to pass in raw text,use predict. Parameters messages – A sequence of chat messages corresponding to a single model input. stop – Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. **kwargs – Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns Top model prediction as a message. validator raise_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. save(file_path: Union[Path, str]) → None¶ Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) validator set_verbose  »  verbose¶ If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object Configuration for this pydantic object. extra = 'forbid'¶
Create a new model by parsing and validating input data from keyword arguments.
629c05ed-7a9b-4a9f-8f8e-b73ee2fa1f66
[ "__future__.annotations", "typing.TYPE_CHECKING", "typing.Any", "typing.List", "typing.Optional", "typing.cast", "pydantic.Field", "pydantic.root_validator", "langchain.callbacks.manager.CallbackManagerForLLMRun", "langchain.llms.huggingface_pipeline.HuggingFacePipeline", "langchain.llms.utils.enforce_stop_tokens", "rellm", "regex.Pattern", "regex.Pattern" ]
langchain.experimental.llms.rellm_decoder.import_rellm
Function
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.llms.rellm_decoder.import_rellm.html#langchain.experimental.llms.rellm_decoder.import_rellm
def import_rellm() -> rellm: """Lazily import rellm.""" try: import rellm except ImportError: raise ValueError( "Could not import rellm python package. " "Please install it with `pip install rellm`." ) return rellm
langchain.experimental.llms.rellm_decoder.import_rellm¶ langchain.experimental.llms.rellm_decoder.import_rellm() → rellm[source]¶ Lazily import rellm.
Lazily import rellm.
d328a4e4-40e2-4710-aed8-fed7caedc850
[ "__future__.annotations", "typing.TYPE_CHECKING", "typing.Any", "typing.List", "typing.Optional", "typing.cast", "pydantic.Field", "pydantic.root_validator", "langchain.callbacks.manager.CallbackManagerForLLMRun", "langchain.llms.huggingface_pipeline.HuggingFacePipeline", "langchain.llms.utils.enforce_stop_tokens", "rellm", "regex.Pattern", "regex.Pattern", "rellm" ]
langchain.experimental.llms.rellm_decoder.RELLM
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.llms.rellm_decoder.RELLM.html#langchain.experimental.llms.rellm_decoder.RELLM
class RELLM(HuggingFacePipeline): regex: RegexPattern = Field(..., description="The structured format to complete.") max_new_tokens: int = Field( default=200, description="Maximum number of new tokens to generate." ) @root_validator def check_rellm_installation(cls, values: dict) -> dict: import_rellm() return values def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: rellm = import_rellm() from transformers import Text2TextGenerationPipeline pipeline = cast(Text2TextGenerationPipeline, self.pipeline) text = rellm.complete_re( prompt, self.regex, tokenizer=pipeline.tokenizer, model=pipeline.model, max_new_tokens=self.max_new_tokens, ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
langchain.experimental.llms.rellm_decoder.RELLM¶ class langchain.experimental.llms.rellm_decoder.RELLM(*, cache: Optional[bool] = None, verbose: bool = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, pipeline: Any = None, model_id: str = 'gpt2', model_kwargs: Optional[dict] = None, pipeline_kwargs: Optional[dict] = None, regex: RegexPattern, max_new_tokens: int = 200)[source]¶ Bases: HuggingFacePipeline Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param cache: Optional[bool] = None¶ param callback_manager: Optional[BaseCallbackManager] = None¶ param callbacks: Callbacks = None¶ param max_new_tokens: int = 200¶ Maximum number of new tokens to generate. param metadata: Optional[Dict[str, Any]] = None¶ Metadata to add to the run trace. param model_id: str = 'gpt2'¶ Model name to use. param model_kwargs: Optional[dict] = None¶ Key word arguments passed to the model. param pipeline_kwargs: Optional[dict] = None¶ Key word arguments passed to the pipeline. param regex: RegexPattern [Required]¶ The structured format to complete. param tags: Optional[List[str]] = None¶ Tags to add to the run trace. param verbose: bool [Optional]¶ Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → str¶ Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → LLMResult¶ Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Asynchronously pass a sequence of prompts and return model generations. This method should make use of batched calls for models that expose a batched API. Use this method when you want to: take advantage of batched calls, need more output from the model than just the top generated value, are building chains that are agnostic to the underlying language modeltype (e.g., pure text completion models vs chat models). Parameters prompts – List of PromptValues. A PromptValue is an object that can be converted to match the format of any language model (string for pure text generation models and BaseMessages for chat models). stop – Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. callbacks – Callbacks to pass through. Used for executing additional functionality, such as logging or streaming, throughout generation. **kwargs – Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns An LLMResult, which contains a list of candidate Generations for each inputprompt and additional model provider-specific output. async apredict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Asynchronously pass a string to the model and return a string prediction. Use this method when calling pure text generation models and only the topcandidate generation is needed. Parameters text – String input to pass to the model. stop – Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. **kwargs – Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns Top model prediction as a string. async apredict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Asynchronously pass messages to the model and return a message prediction. Use this method when calling chat models and only the topcandidate generation is needed. Parameters messages – A sequence of chat messages corresponding to a single model input. stop – Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. **kwargs – Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns Top model prediction as a message. validator check_rellm_installation  »  all fields[source]¶ dict(**kwargs: Any) → Dict¶ Return a dictionary of the LLM. classmethod from_model_id(model_id: str, task: str, device: int = - 1, model_kwargs: Optional[dict] = None, pipeline_kwargs: Optional[dict] = None, **kwargs: Any) → LLM¶ Construct the pipeline object from model_id and task. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → LLMResult¶ Run the LLM on the given prompt and input. generate_prompt(prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → LLMResult¶ Pass a sequence of prompts to the model and return model generations. This method should make use of batched calls for models that expose a batched API. Use this method when you want to: take advantage of batched calls, need more output from the model than just the top generated value, are building chains that are agnostic to the underlying language modeltype (e.g., pure text completion models vs chat models). Parameters prompts – List of PromptValues. A PromptValue is an object that can be converted to match the format of any language model (string for pure text generation models and BaseMessages for chat models). stop – Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. callbacks – Callbacks to pass through. Used for executing additional functionality, such as logging or streaming, throughout generation. **kwargs – Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns An LLMResult, which contains a list of candidate Generations for each inputprompt and additional model provider-specific output. get_num_tokens(text: str) → int¶ Get the number of tokens present in the text. Useful for checking if an input will fit in a model’s context window. Parameters text – The string input to tokenize. Returns The integer number of tokens in the text. get_num_tokens_from_messages(messages: List[BaseMessage]) → int¶ Get the number of tokens in the messages. Useful for checking if an input will fit in a model’s context window. Parameters messages – The message inputs to tokenize. Returns The sum of the number of tokens across the messages. get_token_ids(text: str) → List[int]¶ Return the ordered ids of the tokens in a text. Parameters text – The string input to tokenize. Returns A list of ids corresponding to the tokens in the text, in order they occurin the text. predict(text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → str¶ Pass a single string input to the model and return a string prediction. Use this method when passing in raw text. If you want to pass in specifictypes of chat messages, use predict_messages. Parameters text – String input to pass to the model. stop – Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. **kwargs – Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns Top model prediction as a string. predict_messages(messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None, **kwargs: Any) → BaseMessage¶ Pass a message sequence to the model and return a message prediction. Use this method when passing in chat messages. If you want to pass in raw text,use predict. Parameters messages – A sequence of chat messages corresponding to a single model input. stop – Stop words to use when generating. Model output is cut off at the first occurrence of any of these substrings. **kwargs – Arbitrary additional keyword arguments. These are usually passed to the model provider API call. Returns Top model prediction as a message. validator raise_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. save(file_path: Union[Path, str]) → None¶ Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) validator set_verbose  »  verbose¶ If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object Configuration for this pydantic object. extra = 'forbid'¶
Create a new model by parsing and validating input data from keyword arguments.
ddf301c2-1b28-435a-ab85-6532d64f18f1
[ "enum.Enum" ]
langchain.experimental.cpal.constants.Constant
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.cpal.constants.Constant.html#langchain.experimental.cpal.constants.Constant
class Constant(Enum): narrative_input = "narrative_input" chain_answer = "chain_answer" # natural language answer chain_data = "chain_data" # pydantic instance
langchain.experimental.cpal.constants.Constant¶ class langchain.experimental.cpal.constants.Constant(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)[source]¶ Bases: Enum Attributes narrative_input chain_answer chain_data chain_answer = 'chain_answer'¶ chain_data = 'chain_data'¶ narrative_input = 'narrative_input'¶
44f94e43-e9df-4bf6-aefc-733327ad3da9
[ "collections.deque", "typing.Any", "typing.Dict", "typing.List", "typing.Optional", "pydantic.BaseModel", "pydantic.Field", "langchain.callbacks.manager.CallbackManagerForChainRun", "langchain.chains.base.Chain", "langchain.experimental.autonomous_agents.baby_agi.task_creation.TaskCreationChain", "langchain.experimental.autonomous_agents.baby_agi.task_execution.TaskExecutionChain", "langchain.experimental.autonomous_agents.baby_agi.task_prioritization.TaskPrioritizationChain", "langchain.schema.language_model.BaseLanguageModel", "langchain.vectorstores.base.VectorStore" ]
langchain.experimental.autonomous_agents.baby_agi.baby_agi.BabyAGI
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.autonomous_agents.baby_agi.baby_agi.BabyAGI.html#langchain.experimental.autonomous_agents.baby_agi.baby_agi.BabyAGI
class BabyAGI(Chain, BaseModel): """Controller model for the BabyAGI agent.""" task_list: deque = Field(default_factory=deque) task_creation_chain: Chain = Field(...) task_prioritization_chain: Chain = Field(...) execution_chain: Chain = Field(...) task_id_counter: int = Field(1) vectorstore: VectorStore = Field(init=False) max_iterations: Optional[int] = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def add_task(self, task: Dict) -> None: self.task_list.append(task) def print_task_list(self) -> None: print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m") for t in self.task_list: print(str(t["task_id"]) + ": " + t["task_name"]) def print_next_task(self, task: Dict) -> None: print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m") print(str(task["task_id"]) + ": " + task["task_name"]) def print_task_result(self, result: str) -> None: print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m") print(result) @property def input_keys(self) -> List[str]: return ["objective"] @property def output_keys(self) -> List[str]: return [] def get_next_task( self, result: str, task_description: str, objective: str ) -> List[Dict]: """Get the next task.""" task_names = [t["task_name"] for t in self.task_list] incomplete_tasks = ", ".join(task_names) response = self.task_creation_chain.run( result=result, task_description=task_description, incomplete_tasks=incomplete_tasks, objective=objective, ) new_tasks = response.split("\n") return [ {"task_name": task_name} for task_name in new_tasks if task_name.strip() ] def prioritize_tasks(self, this_task_id: int, objective: str) -> List[Dict]: """Prioritize tasks.""" task_names = [t["task_name"] for t in list(self.task_list)] next_task_id = int(this_task_id) + 1 response = self.task_prioritization_chain.run( task_names=", ".join(task_names), next_task_id=str(next_task_id), objective=objective, ) new_tasks = response.split("\n") prioritized_task_list = [] for task_string in new_tasks: if not task_string.strip(): continue task_parts = task_string.strip().split(".", 1) if len(task_parts) == 2: task_id = task_parts[0].strip() task_name = task_parts[1].strip() prioritized_task_list.append( {"task_id": task_id, "task_name": task_name} ) return prioritized_task_list def _get_top_tasks(self, query: str, k: int) -> List[str]: """Get the top k tasks based on the query.""" results = self.vectorstore.similarity_search(query, k=k) if not results: return [] return [str(item.metadata["task"]) for item in results] def execute_task(self, objective: str, task: str, k: int = 5) -> str: """Execute a task.""" context = self._get_top_tasks(query=objective, k=k) return self.execution_chain.run( objective=objective, context="\n".join(context), task=task ) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: """Run the agent.""" objective = inputs["objective"] first_task = inputs.get("first_task", "Make a todo list") self.add_task({"task_id": 1, "task_name": first_task}) num_iters = 0 while True: if self.task_list: self.print_task_list() # Step 1: Pull the first task task = self.task_list.popleft() self.print_next_task(task) # Step 2: Execute the task result = self.execute_task(objective, task["task_name"]) this_task_id = int(task["task_id"]) self.print_task_result(result) # Step 3: Store the result in Pinecone result_id = f"result_{task['task_id']}" self.vectorstore.add_texts( texts=[result], metadatas=[{"task": task["task_name"]}], ids=[result_id], ) # Step 4: Create new tasks and reprioritize task list new_tasks = self.get_next_task(result, task["task_name"], objective) for new_task in new_tasks: self.task_id_counter += 1 new_task.update({"task_id": self.task_id_counter}) self.add_task(new_task) self.task_list = deque(self.prioritize_tasks(this_task_id, objective)) num_iters += 1 if self.max_iterations is not None and num_iters == self.max_iterations: print( "\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m" ) break return {} @classmethod def from_llm( cls, llm: BaseLanguageModel, vectorstore: VectorStore, verbose: bool = False, task_execution_chain: Optional[Chain] = None, **kwargs: Dict[str, Any], ) -> "BabyAGI": """Initialize the BabyAGI Controller.""" task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose) task_prioritization_chain = TaskPrioritizationChain.from_llm( llm, verbose=verbose ) if task_execution_chain is None: execution_chain: Chain = TaskExecutionChain.from_llm(llm, verbose=verbose) else: execution_chain = task_execution_chain return cls( task_creation_chain=task_creation_chain, task_prioritization_chain=task_prioritization_chain, execution_chain=execution_chain, vectorstore=vectorstore, **kwargs, )
langchain.experimental.autonomous_agents.baby_agi.baby_agi.BabyAGI¶ class langchain.experimental.autonomous_agents.baby_agi.baby_agi.BabyAGI(*, memory: Optional[BaseMemory] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, verbose: bool = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, task_list: deque = None, task_creation_chain: Chain, task_prioritization_chain: Chain, execution_chain: Chain, task_id_counter: int = 1, vectorstore: VectorStore, max_iterations: Optional[int] = None)[source]¶ Bases: Chain, BaseModel Controller model for the BabyAGI agent. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param callback_manager: Optional[BaseCallbackManager] = None¶ Deprecated, use callbacks instead. param callbacks: Callbacks = None¶ Optional list of callback handlers (or callback manager). Defaults to None. Callback handlers are called throughout the lifecycle of a call to a chain, starting with on_chain_start, ending with on_chain_end or on_chain_error. Each custom chain can optionally call additional callback methods, see Callback docs for full details. param execution_chain: langchain.chains.base.Chain [Required]¶ param max_iterations: Optional[int] = None¶ param memory: Optional[BaseMemory] = None¶ Optional memory object. Defaults to None. Memory is a class that gets called at the start and at the end of every chain. At the start, memory loads variables and passes them along in the chain. At the end, it saves any returned variables. There are many different types of memory - please see memory docs for the full catalog. param metadata: Optional[Dict[str, Any]] = None¶ Optional metadata associated with the chain. Defaults to None This metadata will be associated with each call to this chain, and passed as arguments to the handlers defined in callbacks. You can use these to eg identify a specific instance of a chain with its use case. param tags: Optional[List[str]] = None¶ Optional list of tags associated with the chain. Defaults to None These tags will be associated with each call to this chain, and passed as arguments to the handlers defined in callbacks. You can use these to eg identify a specific instance of a chain with its use case. param task_creation_chain: langchain.chains.base.Chain [Required]¶ param task_id_counter: int = 1¶ param task_list: collections.deque [Optional]¶ param task_prioritization_chain: langchain.chains.base.Chain [Required]¶ param vectorstore: langchain.vectorstores.base.VectorStore [Required]¶ param verbose: bool [Optional]¶ Whether or not run in verbose mode. In verbose mode, some intermediate logs will be printed to the console. Defaults to langchain.verbose value. __call__(inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False) → Dict[str, Any]¶ Execute the chain. Parameters inputs – Dictionary of inputs, or single input if chain expects only one param. Should contain all inputs specified in Chain.input_keys except for inputs that will be set by the chain’s memory. return_only_outputs – Whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. metadata – Optional metadata associated with the chain. Defaults to None include_run_info – Whether to include run info in the response. Defaults to False. Returns A dict of named outputs. Should contain all outputs specified inChain.output_keys. async acall(inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False) → Dict[str, Any]¶ Asynchronously execute the chain. Parameters inputs – Dictionary of inputs, or single input if chain expects only one param. Should contain all inputs specified in Chain.input_keys except for inputs that will be set by the chain’s memory. return_only_outputs – Whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. metadata – Optional metadata associated with the chain. Defaults to None include_run_info – Whether to include run info in the response. Defaults to False. Returns A dict of named outputs. Should contain all outputs specified inChain.output_keys. add_task(task: Dict) → None[source]¶ apply(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None) → List[Dict[str, str]]¶ Call the chain on all inputs in the list. async arun(*args: Any, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → str¶ Convenience method for executing chain when there’s a single string output. The main difference between this method and Chain.__call__ is that this methodcan only be used for chains that return a single string output. If a Chain has more outputs, a non-string output, or you want to return the inputs/run info along with the outputs, use Chain.__call__. The other difference is that this method expects inputs to be passed directly in as positional arguments or keyword arguments, whereas Chain.__call__ expects a single input dictionary with all the inputs. Parameters *args – If the chain expects a single input, it can be passed in as the sole positional argument. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. **kwargs – If the chain expects multiple inputs, they can be passed in directly as keyword arguments. Returns The chain output as a string. Example # Suppose we have a single-input chain that takes a 'question' string: await chain.arun("What's the temperature in Boise, Idaho?") # -> "The temperature in Boise is..." # Suppose we have a multi-input chain that takes a 'question' string # and 'context' string: question = "What's the temperature in Boise, Idaho?" context = "Weather report for Boise, Idaho on 07/03/23..." await chain.arun(question=question, context=context) # -> "The temperature in Boise is..." dict(**kwargs: Any) → Dict¶ Return dictionary representation of chain. Expects Chain._chain_type property to be implemented and for memory to benull. Parameters **kwargs – Keyword arguments passed to default pydantic.BaseModel.dict method. Returns A dictionary representation of the chain. Example ..code-block:: python chain.dict(exclude_unset=True) # -> {“_type”: “foo”, “verbose”: False, …} execute_task(objective: str, task: str, k: int = 5) → str[source]¶ Execute a task. classmethod from_llm(llm: BaseLanguageModel, vectorstore: VectorStore, verbose: bool = False, task_execution_chain: Optional[Chain] = None, **kwargs: Dict[str, Any]) → BabyAGI[source]¶ Initialize the BabyAGI Controller. get_next_task(result: str, task_description: str, objective: str) → List[Dict][source]¶ Get the next task. prep_inputs(inputs: Union[Dict[str, Any], Any]) → Dict[str, str]¶ Validate and prepare chain inputs, including adding inputs from memory. Parameters inputs – Dictionary of raw inputs, or single input if chain expects only one param. Should contain all inputs specified in Chain.input_keys except for inputs that will be set by the chain’s memory. Returns A dictionary of all inputs, including those added by the chain’s memory. prep_outputs(inputs: Dict[str, str], outputs: Dict[str, str], return_only_outputs: bool = False) → Dict[str, str]¶ Validate and prepare chain outputs, and save info about this run to memory. Parameters inputs – Dictionary of chain inputs, including any inputs added by chain memory. outputs – Dictionary of initial chain outputs. return_only_outputs – Whether to only return the chain outputs. If False, inputs are also added to the final outputs. Returns A dict of the final chain outputs. print_next_task(task: Dict) → None[source]¶ print_task_list() → None[source]¶ print_task_result(result: str) → None[source]¶ prioritize_tasks(this_task_id: int, objective: str) → List[Dict][source]¶ Prioritize tasks. validator raise_callback_manager_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. run(*args: Any, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → str¶ Convenience method for executing chain when there’s a single string output. The main difference between this method and Chain.__call__ is that this methodcan only be used for chains that return a single string output. If a Chain has more outputs, a non-string output, or you want to return the inputs/run info along with the outputs, use Chain.__call__. The other difference is that this method expects inputs to be passed directly in as positional arguments or keyword arguments, whereas Chain.__call__ expects a single input dictionary with all the inputs. Parameters *args – If the chain expects a single input, it can be passed in as the sole positional argument. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. **kwargs – If the chain expects multiple inputs, they can be passed in directly as keyword arguments. Returns The chain output as a string. Example # Suppose we have a single-input chain that takes a 'question' string: chain.run("What's the temperature in Boise, Idaho?") # -> "The temperature in Boise is..." # Suppose we have a multi-input chain that takes a 'question' string # and 'context' string: question = "What's the temperature in Boise, Idaho?" context = "Weather report for Boise, Idaho on 07/03/23..." chain.run(question=question, context=context) # -> "The temperature in Boise is..." save(file_path: Union[Path, str]) → None¶ Save the chain. Expects Chain._chain_type property to be implemented and for memory to benull. Parameters file_path – Path to file to save the chain to. Example chain.save(file_path="path/chain.yaml") validator set_verbose  »  verbose¶ Set the chain verbosity. Defaults to the global setting if not specified by the user. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property input_keys: List[str]¶ Return the keys expected to be in the chain input. property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. property output_keys: List[str]¶ Return the keys expected to be in the chain output. model Config[source]¶ Bases: object Configuration for this pydantic object. arbitrary_types_allowed = True¶
Controller model for the BabyAGI agent.
61c710f0-8f2d-409a-b2be-4378996d7bfc
[ "langchain.LLMChain", "langchain.PromptTemplate", "langchain.schema.language_model.BaseLanguageModel" ]
langchain.experimental.autonomous_agents.baby_agi.task_prioritization.TaskPrioritizationChain
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.autonomous_agents.baby_agi.task_prioritization.TaskPrioritizationChain.html#langchain.experimental.autonomous_agents.baby_agi.task_prioritization.TaskPrioritizationChain
class TaskPrioritizationChain(LLMChain): """Chain to prioritize tasks.""" @classmethod def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: """Get the response parser.""" task_prioritization_template = ( "You are a task prioritization AI tasked with cleaning the formatting of " "and reprioritizing the following tasks: {task_names}." " Consider the ultimate objective of your team: {objective}." " Do not remove any tasks. Return the result as a numbered list, like:" " #. First task" " #. Second task" " Start the task list with number {next_task_id}." ) prompt = PromptTemplate( template=task_prioritization_template, input_variables=["task_names", "next_task_id", "objective"], ) return cls(prompt=prompt, llm=llm, verbose=verbose)
langchain.experimental.autonomous_agents.baby_agi.task_prioritization.TaskPrioritizationChain¶ class langchain.experimental.autonomous_agents.baby_agi.task_prioritization.TaskPrioritizationChain(*, memory: Optional[BaseMemory] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, verbose: bool = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, prompt: BasePromptTemplate, llm: BaseLanguageModel, output_key: str = 'text', output_parser: BaseLLMOutputParser = None, return_final_only: bool = True, llm_kwargs: dict = None)[source]¶ Bases: LLMChain Chain to prioritize tasks. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param callback_manager: Optional[BaseCallbackManager] = None¶ Deprecated, use callbacks instead. param callbacks: Callbacks = None¶ Optional list of callback handlers (or callback manager). Defaults to None. Callback handlers are called throughout the lifecycle of a call to a chain, starting with on_chain_start, ending with on_chain_end or on_chain_error. Each custom chain can optionally call additional callback methods, see Callback docs for full details. param llm: BaseLanguageModel [Required]¶ Language model to call. param llm_kwargs: dict [Optional]¶ param memory: Optional[BaseMemory] = None¶ Optional memory object. Defaults to None. Memory is a class that gets called at the start and at the end of every chain. At the start, memory loads variables and passes them along in the chain. At the end, it saves any returned variables. There are many different types of memory - please see memory docs for the full catalog. param metadata: Optional[Dict[str, Any]] = None¶ Optional metadata associated with the chain. Defaults to None This metadata will be associated with each call to this chain, and passed as arguments to the handlers defined in callbacks. You can use these to eg identify a specific instance of a chain with its use case. param output_key: str = 'text'¶ param output_parser: BaseLLMOutputParser [Optional]¶ Output parser to use. Defaults to one that takes the most likely string but does not change it otherwise. param prompt: BasePromptTemplate [Required]¶ Prompt object to use. param return_final_only: bool = True¶ Whether to return only the final parsed result. Defaults to True. If false, will return a bunch of extra information about the generation. param tags: Optional[List[str]] = None¶ Optional list of tags associated with the chain. Defaults to None These tags will be associated with each call to this chain, and passed as arguments to the handlers defined in callbacks. You can use these to eg identify a specific instance of a chain with its use case. param verbose: bool [Optional]¶ Whether or not run in verbose mode. In verbose mode, some intermediate logs will be printed to the console. Defaults to langchain.verbose value. __call__(inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False) → Dict[str, Any]¶ Execute the chain. Parameters inputs – Dictionary of inputs, or single input if chain expects only one param. Should contain all inputs specified in Chain.input_keys except for inputs that will be set by the chain’s memory. return_only_outputs – Whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. metadata – Optional metadata associated with the chain. Defaults to None include_run_info – Whether to include run info in the response. Defaults to False. Returns A dict of named outputs. Should contain all outputs specified inChain.output_keys. async aapply(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None) → List[Dict[str, str]]¶ Utilize the LLM generate method for speed gains. async aapply_and_parse(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None) → Sequence[Union[str, List[str], Dict[str, str]]]¶ Call apply and then parse the results. async acall(inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False) → Dict[str, Any]¶ Asynchronously execute the chain. Parameters inputs – Dictionary of inputs, or single input if chain expects only one param. Should contain all inputs specified in Chain.input_keys except for inputs that will be set by the chain’s memory. return_only_outputs – Whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. metadata – Optional metadata associated with the chain. Defaults to None include_run_info – Whether to include run info in the response. Defaults to False. Returns A dict of named outputs. Should contain all outputs specified inChain.output_keys. async agenerate(input_list: List[Dict[str, Any]], run_manager: Optional[AsyncCallbackManagerForChainRun] = None) → LLMResult¶ Generate LLM result from inputs. apply(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None) → List[Dict[str, str]]¶ Utilize the LLM generate method for speed gains. apply_and_parse(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None) → Sequence[Union[str, List[str], Dict[str, str]]]¶ Call apply and then parse the results. async apredict(callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → str¶ Format prompt with kwargs and pass to LLM. Parameters callbacks – Callbacks to pass to LLMChain **kwargs – Keys to pass to prompt template. Returns Completion from LLM. Example completion = llm.predict(adjective="funny") async apredict_and_parse(callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → Union[str, List[str], Dict[str, str]]¶ Call apredict and then parse the results. async aprep_prompts(input_list: List[Dict[str, Any]], run_manager: Optional[AsyncCallbackManagerForChainRun] = None) → Tuple[List[PromptValue], Optional[List[str]]]¶ Prepare prompts from inputs. async arun(*args: Any, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → str¶ Convenience method for executing chain when there’s a single string output. The main difference between this method and Chain.__call__ is that this methodcan only be used for chains that return a single string output. If a Chain has more outputs, a non-string output, or you want to return the inputs/run info along with the outputs, use Chain.__call__. The other difference is that this method expects inputs to be passed directly in as positional arguments or keyword arguments, whereas Chain.__call__ expects a single input dictionary with all the inputs. Parameters *args – If the chain expects a single input, it can be passed in as the sole positional argument. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. **kwargs – If the chain expects multiple inputs, they can be passed in directly as keyword arguments. Returns The chain output as a string. Example # Suppose we have a single-input chain that takes a 'question' string: await chain.arun("What's the temperature in Boise, Idaho?") # -> "The temperature in Boise is..." # Suppose we have a multi-input chain that takes a 'question' string # and 'context' string: question = "What's the temperature in Boise, Idaho?" context = "Weather report for Boise, Idaho on 07/03/23..." await chain.arun(question=question, context=context) # -> "The temperature in Boise is..." create_outputs(llm_result: LLMResult) → List[Dict[str, Any]]¶ Create outputs from response. dict(**kwargs: Any) → Dict¶ Return dictionary representation of chain. Expects Chain._chain_type property to be implemented and for memory to benull. Parameters **kwargs – Keyword arguments passed to default pydantic.BaseModel.dict method. Returns A dictionary representation of the chain. Example ..code-block:: python chain.dict(exclude_unset=True) # -> {“_type”: “foo”, “verbose”: False, …} classmethod from_llm(llm: BaseLanguageModel, verbose: bool = True) → LLMChain[source]¶ Get the response parser. classmethod from_string(llm: BaseLanguageModel, template: str) → LLMChain¶ Create LLMChain from LLM and template. generate(input_list: List[Dict[str, Any]], run_manager: Optional[CallbackManagerForChainRun] = None) → LLMResult¶ Generate LLM result from inputs. predict(callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → str¶ Format prompt with kwargs and pass to LLM. Parameters callbacks – Callbacks to pass to LLMChain **kwargs – Keys to pass to prompt template. Returns Completion from LLM. Example completion = llm.predict(adjective="funny") predict_and_parse(callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → Union[str, List[str], Dict[str, Any]]¶ Call predict and then parse the results. prep_inputs(inputs: Union[Dict[str, Any], Any]) → Dict[str, str]¶ Validate and prepare chain inputs, including adding inputs from memory. Parameters inputs – Dictionary of raw inputs, or single input if chain expects only one param. Should contain all inputs specified in Chain.input_keys except for inputs that will be set by the chain’s memory. Returns A dictionary of all inputs, including those added by the chain’s memory. prep_outputs(inputs: Dict[str, str], outputs: Dict[str, str], return_only_outputs: bool = False) → Dict[str, str]¶ Validate and prepare chain outputs, and save info about this run to memory. Parameters inputs – Dictionary of chain inputs, including any inputs added by chain memory. outputs – Dictionary of initial chain outputs. return_only_outputs – Whether to only return the chain outputs. If False, inputs are also added to the final outputs. Returns A dict of the final chain outputs. prep_prompts(input_list: List[Dict[str, Any]], run_manager: Optional[CallbackManagerForChainRun] = None) → Tuple[List[PromptValue], Optional[List[str]]]¶ Prepare prompts from inputs. validator raise_callback_manager_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. run(*args: Any, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → str¶ Convenience method for executing chain when there’s a single string output. The main difference between this method and Chain.__call__ is that this methodcan only be used for chains that return a single string output. If a Chain has more outputs, a non-string output, or you want to return the inputs/run info along with the outputs, use Chain.__call__. The other difference is that this method expects inputs to be passed directly in as positional arguments or keyword arguments, whereas Chain.__call__ expects a single input dictionary with all the inputs. Parameters *args – If the chain expects a single input, it can be passed in as the sole positional argument. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. **kwargs – If the chain expects multiple inputs, they can be passed in directly as keyword arguments. Returns The chain output as a string. Example # Suppose we have a single-input chain that takes a 'question' string: chain.run("What's the temperature in Boise, Idaho?") # -> "The temperature in Boise is..." # Suppose we have a multi-input chain that takes a 'question' string # and 'context' string: question = "What's the temperature in Boise, Idaho?" context = "Weather report for Boise, Idaho on 07/03/23..." chain.run(question=question, context=context) # -> "The temperature in Boise is..." save(file_path: Union[Path, str]) → None¶ Save the chain. Expects Chain._chain_type property to be implemented and for memory to benull. Parameters file_path – Path to file to save the chain to. Example chain.save(file_path="path/chain.yaml") validator set_verbose  »  verbose¶ Set the chain verbosity. Defaults to the global setting if not specified by the user. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object Configuration for this pydantic object. arbitrary_types_allowed = True¶ extra = 'forbid'¶
Chain to prioritize tasks.
3846202b-b308-4b51-988d-1e26fb0e9c8e
[ "langchain.LLMChain", "langchain.PromptTemplate", "langchain.schema.language_model.BaseLanguageModel" ]
langchain.experimental.autonomous_agents.baby_agi.task_execution.TaskExecutionChain
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.autonomous_agents.baby_agi.task_execution.TaskExecutionChain.html#langchain.experimental.autonomous_agents.baby_agi.task_execution.TaskExecutionChain
class TaskExecutionChain(LLMChain): """Chain to execute tasks.""" @classmethod def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: """Get the response parser.""" execution_template = ( "You are an AI who performs one task based on the following objective: " "{objective}." "Take into account these previously completed tasks: {context}." " Your task: {task}. Response:" ) prompt = PromptTemplate( template=execution_template, input_variables=["objective", "context", "task"], ) return cls(prompt=prompt, llm=llm, verbose=verbose)
langchain.experimental.autonomous_agents.baby_agi.task_execution.TaskExecutionChain¶ class langchain.experimental.autonomous_agents.baby_agi.task_execution.TaskExecutionChain(*, memory: Optional[BaseMemory] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, verbose: bool = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, prompt: BasePromptTemplate, llm: BaseLanguageModel, output_key: str = 'text', output_parser: BaseLLMOutputParser = None, return_final_only: bool = True, llm_kwargs: dict = None)[source]¶ Bases: LLMChain Chain to execute tasks. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param callback_manager: Optional[BaseCallbackManager] = None¶ Deprecated, use callbacks instead. param callbacks: Callbacks = None¶ Optional list of callback handlers (or callback manager). Defaults to None. Callback handlers are called throughout the lifecycle of a call to a chain, starting with on_chain_start, ending with on_chain_end or on_chain_error. Each custom chain can optionally call additional callback methods, see Callback docs for full details. param llm: BaseLanguageModel [Required]¶ Language model to call. param llm_kwargs: dict [Optional]¶ param memory: Optional[BaseMemory] = None¶ Optional memory object. Defaults to None. Memory is a class that gets called at the start and at the end of every chain. At the start, memory loads variables and passes them along in the chain. At the end, it saves any returned variables. There are many different types of memory - please see memory docs for the full catalog. param metadata: Optional[Dict[str, Any]] = None¶ Optional metadata associated with the chain. Defaults to None This metadata will be associated with each call to this chain, and passed as arguments to the handlers defined in callbacks. You can use these to eg identify a specific instance of a chain with its use case. param output_key: str = 'text'¶ param output_parser: BaseLLMOutputParser [Optional]¶ Output parser to use. Defaults to one that takes the most likely string but does not change it otherwise. param prompt: BasePromptTemplate [Required]¶ Prompt object to use. param return_final_only: bool = True¶ Whether to return only the final parsed result. Defaults to True. If false, will return a bunch of extra information about the generation. param tags: Optional[List[str]] = None¶ Optional list of tags associated with the chain. Defaults to None These tags will be associated with each call to this chain, and passed as arguments to the handlers defined in callbacks. You can use these to eg identify a specific instance of a chain with its use case. param verbose: bool [Optional]¶ Whether or not run in verbose mode. In verbose mode, some intermediate logs will be printed to the console. Defaults to langchain.verbose value. __call__(inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False) → Dict[str, Any]¶ Execute the chain. Parameters inputs – Dictionary of inputs, or single input if chain expects only one param. Should contain all inputs specified in Chain.input_keys except for inputs that will be set by the chain’s memory. return_only_outputs – Whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. metadata – Optional metadata associated with the chain. Defaults to None include_run_info – Whether to include run info in the response. Defaults to False. Returns A dict of named outputs. Should contain all outputs specified inChain.output_keys. async aapply(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None) → List[Dict[str, str]]¶ Utilize the LLM generate method for speed gains. async aapply_and_parse(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None) → Sequence[Union[str, List[str], Dict[str, str]]]¶ Call apply and then parse the results. async acall(inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False) → Dict[str, Any]¶ Asynchronously execute the chain. Parameters inputs – Dictionary of inputs, or single input if chain expects only one param. Should contain all inputs specified in Chain.input_keys except for inputs that will be set by the chain’s memory. return_only_outputs – Whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. metadata – Optional metadata associated with the chain. Defaults to None include_run_info – Whether to include run info in the response. Defaults to False. Returns A dict of named outputs. Should contain all outputs specified inChain.output_keys. async agenerate(input_list: List[Dict[str, Any]], run_manager: Optional[AsyncCallbackManagerForChainRun] = None) → LLMResult¶ Generate LLM result from inputs. apply(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None) → List[Dict[str, str]]¶ Utilize the LLM generate method for speed gains. apply_and_parse(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None) → Sequence[Union[str, List[str], Dict[str, str]]]¶ Call apply and then parse the results. async apredict(callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → str¶ Format prompt with kwargs and pass to LLM. Parameters callbacks – Callbacks to pass to LLMChain **kwargs – Keys to pass to prompt template. Returns Completion from LLM. Example completion = llm.predict(adjective="funny") async apredict_and_parse(callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → Union[str, List[str], Dict[str, str]]¶ Call apredict and then parse the results. async aprep_prompts(input_list: List[Dict[str, Any]], run_manager: Optional[AsyncCallbackManagerForChainRun] = None) → Tuple[List[PromptValue], Optional[List[str]]]¶ Prepare prompts from inputs. async arun(*args: Any, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → str¶ Convenience method for executing chain when there’s a single string output. The main difference between this method and Chain.__call__ is that this methodcan only be used for chains that return a single string output. If a Chain has more outputs, a non-string output, or you want to return the inputs/run info along with the outputs, use Chain.__call__. The other difference is that this method expects inputs to be passed directly in as positional arguments or keyword arguments, whereas Chain.__call__ expects a single input dictionary with all the inputs. Parameters *args – If the chain expects a single input, it can be passed in as the sole positional argument. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. **kwargs – If the chain expects multiple inputs, they can be passed in directly as keyword arguments. Returns The chain output as a string. Example # Suppose we have a single-input chain that takes a 'question' string: await chain.arun("What's the temperature in Boise, Idaho?") # -> "The temperature in Boise is..." # Suppose we have a multi-input chain that takes a 'question' string # and 'context' string: question = "What's the temperature in Boise, Idaho?" context = "Weather report for Boise, Idaho on 07/03/23..." await chain.arun(question=question, context=context) # -> "The temperature in Boise is..." create_outputs(llm_result: LLMResult) → List[Dict[str, Any]]¶ Create outputs from response. dict(**kwargs: Any) → Dict¶ Return dictionary representation of chain. Expects Chain._chain_type property to be implemented and for memory to benull. Parameters **kwargs – Keyword arguments passed to default pydantic.BaseModel.dict method. Returns A dictionary representation of the chain. Example ..code-block:: python chain.dict(exclude_unset=True) # -> {“_type”: “foo”, “verbose”: False, …} classmethod from_llm(llm: BaseLanguageModel, verbose: bool = True) → LLMChain[source]¶ Get the response parser. classmethod from_string(llm: BaseLanguageModel, template: str) → LLMChain¶ Create LLMChain from LLM and template. generate(input_list: List[Dict[str, Any]], run_manager: Optional[CallbackManagerForChainRun] = None) → LLMResult¶ Generate LLM result from inputs. predict(callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → str¶ Format prompt with kwargs and pass to LLM. Parameters callbacks – Callbacks to pass to LLMChain **kwargs – Keys to pass to prompt template. Returns Completion from LLM. Example completion = llm.predict(adjective="funny") predict_and_parse(callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → Union[str, List[str], Dict[str, Any]]¶ Call predict and then parse the results. prep_inputs(inputs: Union[Dict[str, Any], Any]) → Dict[str, str]¶ Validate and prepare chain inputs, including adding inputs from memory. Parameters inputs – Dictionary of raw inputs, or single input if chain expects only one param. Should contain all inputs specified in Chain.input_keys except for inputs that will be set by the chain’s memory. Returns A dictionary of all inputs, including those added by the chain’s memory. prep_outputs(inputs: Dict[str, str], outputs: Dict[str, str], return_only_outputs: bool = False) → Dict[str, str]¶ Validate and prepare chain outputs, and save info about this run to memory. Parameters inputs – Dictionary of chain inputs, including any inputs added by chain memory. outputs – Dictionary of initial chain outputs. return_only_outputs – Whether to only return the chain outputs. If False, inputs are also added to the final outputs. Returns A dict of the final chain outputs. prep_prompts(input_list: List[Dict[str, Any]], run_manager: Optional[CallbackManagerForChainRun] = None) → Tuple[List[PromptValue], Optional[List[str]]]¶ Prepare prompts from inputs. validator raise_callback_manager_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. run(*args: Any, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → str¶ Convenience method for executing chain when there’s a single string output. The main difference between this method and Chain.__call__ is that this methodcan only be used for chains that return a single string output. If a Chain has more outputs, a non-string output, or you want to return the inputs/run info along with the outputs, use Chain.__call__. The other difference is that this method expects inputs to be passed directly in as positional arguments or keyword arguments, whereas Chain.__call__ expects a single input dictionary with all the inputs. Parameters *args – If the chain expects a single input, it can be passed in as the sole positional argument. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. **kwargs – If the chain expects multiple inputs, they can be passed in directly as keyword arguments. Returns The chain output as a string. Example # Suppose we have a single-input chain that takes a 'question' string: chain.run("What's the temperature in Boise, Idaho?") # -> "The temperature in Boise is..." # Suppose we have a multi-input chain that takes a 'question' string # and 'context' string: question = "What's the temperature in Boise, Idaho?" context = "Weather report for Boise, Idaho on 07/03/23..." chain.run(question=question, context=context) # -> "The temperature in Boise is..." save(file_path: Union[Path, str]) → None¶ Save the chain. Expects Chain._chain_type property to be implemented and for memory to benull. Parameters file_path – Path to file to save the chain to. Example chain.save(file_path="path/chain.yaml") validator set_verbose  »  verbose¶ Set the chain verbosity. Defaults to the global setting if not specified by the user. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object Configuration for this pydantic object. arbitrary_types_allowed = True¶ extra = 'forbid'¶
Chain to execute tasks.
61063910-3b6f-4413-b096-3af8c2234beb
[ "langchain.LLMChain", "langchain.PromptTemplate", "langchain.schema.language_model.BaseLanguageModel" ]
langchain.experimental.autonomous_agents.baby_agi.task_creation.TaskCreationChain
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.autonomous_agents.baby_agi.task_creation.TaskCreationChain.html#langchain.experimental.autonomous_agents.baby_agi.task_creation.TaskCreationChain
class TaskCreationChain(LLMChain): """Chain to generates tasks.""" @classmethod def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: """Get the response parser.""" task_creation_template = ( "You are an task creation AI that uses the result of an execution agent" " to create new tasks with the following objective: {objective}," " The last completed task has the result: {result}." " This result was based on this task description: {task_description}." " These are incomplete tasks: {incomplete_tasks}." " Based on the result, create new tasks to be completed" " by the AI system that do not overlap with incomplete tasks." " Return the tasks as an array." ) prompt = PromptTemplate( template=task_creation_template, input_variables=[ "result", "task_description", "incomplete_tasks", "objective", ], ) return cls(prompt=prompt, llm=llm, verbose=verbose)
langchain.experimental.autonomous_agents.baby_agi.task_creation.TaskCreationChain¶ class langchain.experimental.autonomous_agents.baby_agi.task_creation.TaskCreationChain(*, memory: Optional[BaseMemory] = None, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, callback_manager: Optional[BaseCallbackManager] = None, verbose: bool = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, prompt: BasePromptTemplate, llm: BaseLanguageModel, output_key: str = 'text', output_parser: BaseLLMOutputParser = None, return_final_only: bool = True, llm_kwargs: dict = None)[source]¶ Bases: LLMChain Chain to generates tasks. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param callback_manager: Optional[BaseCallbackManager] = None¶ Deprecated, use callbacks instead. param callbacks: Callbacks = None¶ Optional list of callback handlers (or callback manager). Defaults to None. Callback handlers are called throughout the lifecycle of a call to a chain, starting with on_chain_start, ending with on_chain_end or on_chain_error. Each custom chain can optionally call additional callback methods, see Callback docs for full details. param llm: BaseLanguageModel [Required]¶ Language model to call. param llm_kwargs: dict [Optional]¶ param memory: Optional[BaseMemory] = None¶ Optional memory object. Defaults to None. Memory is a class that gets called at the start and at the end of every chain. At the start, memory loads variables and passes them along in the chain. At the end, it saves any returned variables. There are many different types of memory - please see memory docs for the full catalog. param metadata: Optional[Dict[str, Any]] = None¶ Optional metadata associated with the chain. Defaults to None This metadata will be associated with each call to this chain, and passed as arguments to the handlers defined in callbacks. You can use these to eg identify a specific instance of a chain with its use case. param output_key: str = 'text'¶ param output_parser: BaseLLMOutputParser [Optional]¶ Output parser to use. Defaults to one that takes the most likely string but does not change it otherwise. param prompt: BasePromptTemplate [Required]¶ Prompt object to use. param return_final_only: bool = True¶ Whether to return only the final parsed result. Defaults to True. If false, will return a bunch of extra information about the generation. param tags: Optional[List[str]] = None¶ Optional list of tags associated with the chain. Defaults to None These tags will be associated with each call to this chain, and passed as arguments to the handlers defined in callbacks. You can use these to eg identify a specific instance of a chain with its use case. param verbose: bool [Optional]¶ Whether or not run in verbose mode. In verbose mode, some intermediate logs will be printed to the console. Defaults to langchain.verbose value. __call__(inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False) → Dict[str, Any]¶ Execute the chain. Parameters inputs – Dictionary of inputs, or single input if chain expects only one param. Should contain all inputs specified in Chain.input_keys except for inputs that will be set by the chain’s memory. return_only_outputs – Whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. metadata – Optional metadata associated with the chain. Defaults to None include_run_info – Whether to include run info in the response. Defaults to False. Returns A dict of named outputs. Should contain all outputs specified inChain.output_keys. async aapply(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None) → List[Dict[str, str]]¶ Utilize the LLM generate method for speed gains. async aapply_and_parse(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None) → Sequence[Union[str, List[str], Dict[str, str]]]¶ Call apply and then parse the results. async acall(inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, *, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, include_run_info: bool = False) → Dict[str, Any]¶ Asynchronously execute the chain. Parameters inputs – Dictionary of inputs, or single input if chain expects only one param. Should contain all inputs specified in Chain.input_keys except for inputs that will be set by the chain’s memory. return_only_outputs – Whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. metadata – Optional metadata associated with the chain. Defaults to None include_run_info – Whether to include run info in the response. Defaults to False. Returns A dict of named outputs. Should contain all outputs specified inChain.output_keys. async agenerate(input_list: List[Dict[str, Any]], run_manager: Optional[AsyncCallbackManagerForChainRun] = None) → LLMResult¶ Generate LLM result from inputs. apply(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None) → List[Dict[str, str]]¶ Utilize the LLM generate method for speed gains. apply_and_parse(input_list: List[Dict[str, Any]], callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None) → Sequence[Union[str, List[str], Dict[str, str]]]¶ Call apply and then parse the results. async apredict(callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → str¶ Format prompt with kwargs and pass to LLM. Parameters callbacks – Callbacks to pass to LLMChain **kwargs – Keys to pass to prompt template. Returns Completion from LLM. Example completion = llm.predict(adjective="funny") async apredict_and_parse(callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → Union[str, List[str], Dict[str, str]]¶ Call apredict and then parse the results. async aprep_prompts(input_list: List[Dict[str, Any]], run_manager: Optional[AsyncCallbackManagerForChainRun] = None) → Tuple[List[PromptValue], Optional[List[str]]]¶ Prepare prompts from inputs. async arun(*args: Any, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → str¶ Convenience method for executing chain when there’s a single string output. The main difference between this method and Chain.__call__ is that this methodcan only be used for chains that return a single string output. If a Chain has more outputs, a non-string output, or you want to return the inputs/run info along with the outputs, use Chain.__call__. The other difference is that this method expects inputs to be passed directly in as positional arguments or keyword arguments, whereas Chain.__call__ expects a single input dictionary with all the inputs. Parameters *args – If the chain expects a single input, it can be passed in as the sole positional argument. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. **kwargs – If the chain expects multiple inputs, they can be passed in directly as keyword arguments. Returns The chain output as a string. Example # Suppose we have a single-input chain that takes a 'question' string: await chain.arun("What's the temperature in Boise, Idaho?") # -> "The temperature in Boise is..." # Suppose we have a multi-input chain that takes a 'question' string # and 'context' string: question = "What's the temperature in Boise, Idaho?" context = "Weather report for Boise, Idaho on 07/03/23..." await chain.arun(question=question, context=context) # -> "The temperature in Boise is..." create_outputs(llm_result: LLMResult) → List[Dict[str, Any]]¶ Create outputs from response. dict(**kwargs: Any) → Dict¶ Return dictionary representation of chain. Expects Chain._chain_type property to be implemented and for memory to benull. Parameters **kwargs – Keyword arguments passed to default pydantic.BaseModel.dict method. Returns A dictionary representation of the chain. Example ..code-block:: python chain.dict(exclude_unset=True) # -> {“_type”: “foo”, “verbose”: False, …} classmethod from_llm(llm: BaseLanguageModel, verbose: bool = True) → LLMChain[source]¶ Get the response parser. classmethod from_string(llm: BaseLanguageModel, template: str) → LLMChain¶ Create LLMChain from LLM and template. generate(input_list: List[Dict[str, Any]], run_manager: Optional[CallbackManagerForChainRun] = None) → LLMResult¶ Generate LLM result from inputs. predict(callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → str¶ Format prompt with kwargs and pass to LLM. Parameters callbacks – Callbacks to pass to LLMChain **kwargs – Keys to pass to prompt template. Returns Completion from LLM. Example completion = llm.predict(adjective="funny") predict_and_parse(callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, **kwargs: Any) → Union[str, List[str], Dict[str, Any]]¶ Call predict and then parse the results. prep_inputs(inputs: Union[Dict[str, Any], Any]) → Dict[str, str]¶ Validate and prepare chain inputs, including adding inputs from memory. Parameters inputs – Dictionary of raw inputs, or single input if chain expects only one param. Should contain all inputs specified in Chain.input_keys except for inputs that will be set by the chain’s memory. Returns A dictionary of all inputs, including those added by the chain’s memory. prep_outputs(inputs: Dict[str, str], outputs: Dict[str, str], return_only_outputs: bool = False) → Dict[str, str]¶ Validate and prepare chain outputs, and save info about this run to memory. Parameters inputs – Dictionary of chain inputs, including any inputs added by chain memory. outputs – Dictionary of initial chain outputs. return_only_outputs – Whether to only return the chain outputs. If False, inputs are also added to the final outputs. Returns A dict of the final chain outputs. prep_prompts(input_list: List[Dict[str, Any]], run_manager: Optional[CallbackManagerForChainRun] = None) → Tuple[List[PromptValue], Optional[List[str]]]¶ Prepare prompts from inputs. validator raise_callback_manager_deprecation  »  all fields¶ Raise deprecation warning if callback_manager is used. run(*args: Any, callbacks: Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any) → str¶ Convenience method for executing chain when there’s a single string output. The main difference between this method and Chain.__call__ is that this methodcan only be used for chains that return a single string output. If a Chain has more outputs, a non-string output, or you want to return the inputs/run info along with the outputs, use Chain.__call__. The other difference is that this method expects inputs to be passed directly in as positional arguments or keyword arguments, whereas Chain.__call__ expects a single input dictionary with all the inputs. Parameters *args – If the chain expects a single input, it can be passed in as the sole positional argument. callbacks – Callbacks to use for this chain run. These will be called in addition to callbacks passed to the chain during construction, but only these runtime callbacks will propagate to calls to other objects. tags – List of string tags to pass to all callbacks. These will be passed in addition to tags passed to the chain during construction, but only these runtime tags will propagate to calls to other objects. **kwargs – If the chain expects multiple inputs, they can be passed in directly as keyword arguments. Returns The chain output as a string. Example # Suppose we have a single-input chain that takes a 'question' string: chain.run("What's the temperature in Boise, Idaho?") # -> "The temperature in Boise is..." # Suppose we have a multi-input chain that takes a 'question' string # and 'context' string: question = "What's the temperature in Boise, Idaho?" context = "Weather report for Boise, Idaho on 07/03/23..." chain.run(question=question, context=context) # -> "The temperature in Boise is..." save(file_path: Union[Path, str]) → None¶ Save the chain. Expects Chain._chain_type property to be implemented and for memory to benull. Parameters file_path – Path to file to save the chain to. Example chain.save(file_path="path/chain.yaml") validator set_verbose  »  verbose¶ Set the chain verbosity. Defaults to the global setting if not specified by the user. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object Configuration for this pydantic object. arbitrary_types_allowed = True¶ extra = 'forbid'¶
Chain to generates tasks.
e634434d-ec87-44fa-b132-fe428f1dfb0f
[ "json", "re", "abc.abstractmethod", "typing.Dict", "typing.NamedTuple", "langchain.schema.BaseOutputParser" ]
langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTAction
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTAction.html#langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTAction
class AutoGPTAction(NamedTuple): name: str args: Dict
langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTAction¶ class langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTAction(name, args)[source]¶ Bases: NamedTuple Create new instance of AutoGPTAction(name, args) Methods __init__() count(value, /) Return number of occurrences of value. index(value[, start, stop]) Return first index of value. Attributes args Alias for field number 1 name Alias for field number 0 count(value, /)¶ Return number of occurrences of value. index(value, start=0, stop=9223372036854775807, /)¶ Return first index of value. Raises ValueError if the value is not present. args: Dict¶ Alias for field number 1 name: str¶ Alias for field number 0
Create new instance of AutoGPTAction(name, args)
3c51581a-f96b-44fb-b7b1-2b0c8e1a758f
[ "json", "re", "abc.abstractmethod", "typing.Dict", "typing.NamedTuple", "langchain.schema.BaseOutputParser" ]
langchain.experimental.autonomous_agents.autogpt.output_parser.BaseAutoGPTOutputParser
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.autonomous_agents.autogpt.output_parser.BaseAutoGPTOutputParser.html#langchain.experimental.autonomous_agents.autogpt.output_parser.BaseAutoGPTOutputParser
class BaseAutoGPTOutputParser(BaseOutputParser): @abstractmethod def parse(self, text: str) -> AutoGPTAction: """Return AutoGPTAction"""
langchain.experimental.autonomous_agents.autogpt.output_parser.BaseAutoGPTOutputParser¶ class langchain.experimental.autonomous_agents.autogpt.output_parser.BaseAutoGPTOutputParser[source]¶ Bases: BaseOutputParser Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. get_format_instructions() → str¶ Instructions on how the LLM output should be formatted. abstract parse(text: str) → AutoGPTAction[source]¶ Return AutoGPTAction parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt: PromptValue) → Any¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Create a new model by parsing and validating input data from keyword arguments.
ecd602b2-b214-449c-b3b1-c5a99fa8b1c9
[ "json", "re", "abc.abstractmethod", "typing.Dict", "typing.NamedTuple", "langchain.schema.BaseOutputParser" ]
langchain.experimental.autonomous_agents.autogpt.output_parser.preprocess_json_input
Function
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.autonomous_agents.autogpt.output_parser.preprocess_json_input.html#langchain.experimental.autonomous_agents.autogpt.output_parser.preprocess_json_input
def preprocess_json_input(input_str: str) -> str: """Preprocesses a string to be parsed as json. Replace single backslashes with double backslashes, while leaving already escaped ones intact. Args: input_str: String to be preprocessed Returns: Preprocessed string """ corrected_str = re.sub( r'(?<!\\)\\(?!["\\/bfnrt]|u[0-9a-fA-F]{4})', r"\\\\", input_str ) return corrected_str
langchain.experimental.autonomous_agents.autogpt.output_parser.preprocess_json_input¶ langchain.experimental.autonomous_agents.autogpt.output_parser.preprocess_json_input(input_str: str) → str[source]¶ Preprocesses a string to be parsed as json. Replace single backslashes with double backslashes, while leaving already escaped ones intact. Parameters input_str – String to be preprocessed Returns Preprocessed string
Preprocesses a string to be parsed as json.
30ca5750-0bff-4500-a5f8-3ad5fe41dcff
[ "json", "re", "abc.abstractmethod", "typing.Dict", "typing.NamedTuple", "langchain.schema.BaseOutputParser" ]
langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser.html#langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser
class AutoGPTOutputParser(BaseAutoGPTOutputParser): def parse(self, text: str) -> AutoGPTAction: try: parsed = json.loads(text, strict=False) except json.JSONDecodeError: preprocessed_text = preprocess_json_input(text) try: parsed = json.loads(preprocessed_text, strict=False) except Exception: return AutoGPTAction( name="ERROR", args={"error": f"Could not parse invalid json: {text}"}, ) try: return AutoGPTAction( name=parsed["command"]["name"], args=parsed["command"]["args"], ) except (KeyError, TypeError): # If the command is null or incomplete, return an erroneous tool return AutoGPTAction( name="ERROR", args={"error": f"Incomplete command args: {parsed}"} )
langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser¶ class langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser[source]¶ Bases: BaseAutoGPTOutputParser Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. dict(**kwargs: Any) → Dict¶ Return dictionary representation of output parser. get_format_instructions() → str¶ Instructions on how the LLM output should be formatted. parse(text: str) → AutoGPTAction[source]¶ Return AutoGPTAction parse_result(result: List[Generation]) → T¶ Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, whichis assumed to be the highest-likelihood Generation. Parameters result – A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns Structured output. parse_with_prompt(completion: str, prompt: PromptValue) → Any¶ Parse the output of an LLM call with the input prompt for context. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. Parameters completion – String output of language model. prompt – Input PromptValue. Returns Structured output to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object extra = 'ignore'¶
Create a new model by parsing and validating input data from keyword arguments.
dab52a45-1bdd-4cfb-9e3d-e75452cc15e4
[ "time", "typing.Any", "typing.Callable", "typing.List", "pydantic.BaseModel", "langchain.experimental.autonomous_agents.autogpt.prompt_generator.get_prompt", "langchain.prompts.chat.BaseChatPromptTemplate", "langchain.schema.messages.BaseMessage", "langchain.schema.messages.HumanMessage", "langchain.schema.messages.SystemMessage", "langchain.tools.base.BaseTool", "langchain.vectorstores.base.VectorStoreRetriever" ]
langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt.html#langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt
class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel): ai_name: str ai_role: str tools: List[BaseTool] token_counter: Callable[[str], int] send_token_limit: int = 4196 def construct_full_prompt(self, goals: List[str]) -> str: prompt_start = ( "Your decisions must always be made independently " "without seeking user assistance.\n" "Play to your strengths as an LLM and pursue simple " "strategies with no legal complications.\n" "If you have completed all your tasks, make sure to " 'use the "finish" command.' ) # Construct full prompt full_prompt = ( f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n" ) for i, goal in enumerate(goals): full_prompt += f"{i+1}. {goal}\n" full_prompt += f"\n\n{get_prompt(self.tools)}" return full_prompt def format_messages(self, **kwargs: Any) -> List[BaseMessage]: base_prompt = SystemMessage(content=self.construct_full_prompt(kwargs["goals"])) time_prompt = SystemMessage( content=f"The current time and date is {time.strftime('%c')}" ) used_tokens = self.token_counter(base_prompt.content) + self.token_counter( time_prompt.content ) memory: VectorStoreRetriever = kwargs["memory"] previous_messages = kwargs["messages"] relevant_docs = memory.get_relevant_documents(str(previous_messages[-10:])) relevant_memory = [d.page_content for d in relevant_docs] relevant_memory_tokens = sum( [self.token_counter(doc) for doc in relevant_memory] ) while used_tokens + relevant_memory_tokens > 2500: relevant_memory = relevant_memory[:-1] relevant_memory_tokens = sum( [self.token_counter(doc) for doc in relevant_memory] ) content_format = ( f"This reminds you of these events " f"from your past:\n{relevant_memory}\n\n" ) memory_message = SystemMessage(content=content_format) used_tokens += self.token_counter(memory_message.content) historical_messages: List[BaseMessage] = [] for message in previous_messages[-10:][::-1]: message_tokens = self.token_counter(message.content) if used_tokens + message_tokens > self.send_token_limit - 1000: break historical_messages = [message] + historical_messages used_tokens += message_tokens input_message = HumanMessage(content=kwargs["user_input"]) messages: List[BaseMessage] = [base_prompt, time_prompt, memory_message] messages += historical_messages messages.append(input_message) return messages
langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt¶ class langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt(*, input_variables: List[str], output_parser: Optional[BaseOutputParser] = None, partial_variables: Mapping[str, Union[str, Callable[[], str]]] = None, ai_name: str, ai_role: str, tools: List[BaseTool], token_counter: Callable[[str], int], send_token_limit: int = 4196)[source]¶ Bases: BaseChatPromptTemplate, BaseModel Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param ai_name: str [Required]¶ param ai_role: str [Required]¶ param input_variables: List[str] [Required]¶ A list of the names of the variables the prompt template expects. param output_parser: Optional[BaseOutputParser] = None¶ How to parse the output of calling an LLM on this formatted prompt. param partial_variables: Mapping[str, Union[str, Callable[[], str]]] [Optional]¶ param send_token_limit: int = 4196¶ param token_counter: Callable[[str], int] [Required]¶ param tools: List[langchain.tools.base.BaseTool] [Required]¶ construct_full_prompt(goals: List[str]) → str[source]¶ dict(**kwargs: Any) → Dict¶ Return dictionary representation of prompt. format(**kwargs: Any) → str¶ Format the prompt with the inputs. Parameters kwargs – Any arguments to be passed to the prompt template. Returns A formatted string. Example: prompt.format(variable1="foo") format_messages(**kwargs: Any) → List[BaseMessage][source]¶ Format kwargs into a list of messages. format_prompt(**kwargs: Any) → PromptValue¶ Create Chat Messages. partial(**kwargs: Union[str, Callable[[], str]]) → BasePromptTemplate¶ Return a partial of the prompt template. save(file_path: Union[Path, str]) → None¶ Save the prompt. Parameters file_path – Path to directory to save prompt to. Example: .. code-block:: python prompt.save(file_path=”path/prompt.yaml”) to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ validator validate_variable_names  »  all fields¶ Validate variable names do not include restricted names. property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. model Config¶ Bases: object Configuration for this pydantic object. arbitrary_types_allowed = True¶
Create a new model by parsing and validating input data from keyword arguments.
3abcc080-062e-4c08-b21c-764b484dc300
[ "json", "typing.List", "langchain.tools.base.BaseTool" ]
langchain.experimental.autonomous_agents.autogpt.prompt_generator.get_prompt
Function
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.autonomous_agents.autogpt.prompt_generator.get_prompt.html#langchain.experimental.autonomous_agents.autogpt.prompt_generator.get_prompt
def get_prompt(tools: List[BaseTool]) -> str: """This function generates a prompt string. It includes various constraints, commands, resources, and performance evaluations. Returns: str: The generated prompt string. """ # Initialize the PromptGenerator object prompt_generator = PromptGenerator() # Add constraints to the PromptGenerator object prompt_generator.add_constraint( "~4000 word limit for short term memory. " "Your short term memory is short, " "so immediately save important information to files." ) prompt_generator.add_constraint( "If you are unsure how you previously did something " "or want to recall past events, " "thinking about similar events will help you remember." ) prompt_generator.add_constraint("No user assistance") prompt_generator.add_constraint( 'Exclusively use the commands listed in double quotes e.g. "command name"' ) # Add commands to the PromptGenerator object for tool in tools: prompt_generator.add_tool(tool) # Add resources to the PromptGenerator object prompt_generator.add_resource( "Internet access for searches and information gathering." ) prompt_generator.add_resource("Long Term memory management.") prompt_generator.add_resource( "GPT-3.5 powered Agents for delegation of simple tasks." ) prompt_generator.add_resource("File output.") # Add performance evaluations to the PromptGenerator object prompt_generator.add_performance_evaluation( "Continuously review and analyze your actions " "to ensure you are performing to the best of your abilities." ) prompt_generator.add_performance_evaluation( "Constructively self-criticize your big-picture behavior constantly." ) prompt_generator.add_performance_evaluation( "Reflect on past decisions and strategies to refine your approach." ) prompt_generator.add_performance_evaluation( "Every command has a cost, so be smart and efficient. " "Aim to complete tasks in the least number of steps." ) # Generate the prompt string prompt_string = prompt_generator.generate_prompt_string() return prompt_string
langchain.experimental.autonomous_agents.autogpt.prompt_generator.get_prompt¶ langchain.experimental.autonomous_agents.autogpt.prompt_generator.get_prompt(tools: List[BaseTool]) → str[source]¶ This function generates a prompt string. It includes various constraints, commands, resources, and performance evaluations. Returns The generated prompt string. Return type str
This function generates a prompt string.
2a432f75-0fb5-4594-aa1d-d51b8ad64c9f
[ "typing.Any", "typing.Dict", "typing.List", "pydantic.Field", "langchain.memory.chat_memory.BaseChatMemory", "langchain.memory.chat_memory.get_prompt_input_key", "langchain.vectorstores.base.VectorStoreRetriever" ]
langchain.experimental.autonomous_agents.autogpt.memory.AutoGPTMemory
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.autonomous_agents.autogpt.memory.AutoGPTMemory.html#langchain.experimental.autonomous_agents.autogpt.memory.AutoGPTMemory
class AutoGPTMemory(BaseChatMemory): retriever: VectorStoreRetriever = Field(exclude=True) """VectorStoreRetriever object to connect to.""" @property def memory_variables(self) -> List[str]: return ["chat_history", "relevant_context"] def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: """Get the input key for the prompt.""" if self.input_key is None: return get_prompt_input_key(inputs, self.memory_variables) return self.input_key def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: input_key = self._get_prompt_input_key(inputs) query = inputs[input_key] docs = self.retriever.get_relevant_documents(query) return { "chat_history": self.chat_memory.messages[-10:], "relevant_context": docs, }
langchain.experimental.autonomous_agents.autogpt.memory.AutoGPTMemory¶ class langchain.experimental.autonomous_agents.autogpt.memory.AutoGPTMemory(*, chat_memory: BaseChatMessageHistory = None, output_key: Optional[str] = None, input_key: Optional[str] = None, return_messages: bool = False, retriever: VectorStoreRetriever)[source]¶ Bases: BaseChatMemory Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param chat_memory: BaseChatMessageHistory [Optional]¶ param input_key: Optional[str] = None¶ param output_key: Optional[str] = None¶ param retriever: langchain.vectorstores.base.VectorStoreRetriever [Required]¶ VectorStoreRetriever object to connect to. param return_messages: bool = False¶ clear() → None¶ Clear memory contents. load_memory_variables(inputs: Dict[str, Any]) → Dict[str, Any][source]¶ Return key-value pairs given the text input to the chain. save_context(inputs: Dict[str, Any], outputs: Dict[str, str]) → None¶ Save context from this conversation to buffer. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. property memory_variables: List[str]¶ The string keys this memory class will add to chain inputs. model Config¶ Bases: object Configuration for this pydantic object. arbitrary_types_allowed = True¶
Create a new model by parsing and validating input data from keyword arguments.
17a467c9-c732-4dff-b860-c48fe3876a83
[ "logging", "re", "datetime.datetime", "typing.Any", "typing.Dict", "typing.List", "typing.Optional", "langchain.LLMChain", "langchain.prompts.PromptTemplate", "langchain.retrievers.TimeWeightedVectorStoreRetriever", "langchain.schema.BaseMemory", "langchain.schema.Document", "langchain.schema.language_model.BaseLanguageModel", "langchain.utils.mock_now" ]
langchain.experimental.generative_agents.memory.GenerativeAgentMemory
Class
https://api.python.langchain.com/en/latest/experimental/langchain.experimental.generative_agents.memory.GenerativeAgentMemory.html#langchain.experimental.generative_agents.memory.GenerativeAgentMemory
class GenerativeAgentMemory(BaseMemory): llm: BaseLanguageModel """The core language model.""" memory_retriever: TimeWeightedVectorStoreRetriever """The retriever to fetch related memories.""" verbose: bool = False reflection_threshold: Optional[float] = None """When aggregate_importance exceeds reflection_threshold, stop to reflect.""" current_plan: List[str] = [] """The current plan of the agent.""" # A weight of 0.15 makes this less important than it # would be otherwise, relative to salience and time importance_weight: float = 0.15 """How much weight to assign the memory importance.""" aggregate_importance: float = 0.0 # : :meta private: """Track the sum of the 'importance' of recent memories. Triggers reflection when it reaches reflection_threshold.""" max_tokens_limit: int = 1200 # : :meta private: # input keys queries_key: str = "queries" most_recent_memories_token_key: str = "recent_memories_token" add_memory_key: str = "add_memory" # output keys relevant_memories_key: str = "relevant_memories" relevant_memories_simple_key: str = "relevant_memories_simple" most_recent_memories_key: str = "most_recent_memories" now_key: str = "now" reflecting: bool = False def chain(self, prompt: PromptTemplate) -> LLMChain: return LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose) @staticmethod def _parse_list(text: str) -> List[str]: """Parse a newline-separated string into a list of strings.""" lines = re.split(r"\n", text.strip()) lines = [line for line in lines if line.strip()] # remove empty lines return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines] def _get_topics_of_reflection(self, last_k: int = 50) -> List[str]: """Return the 3 most salient high-level questions about recent observations.""" prompt = PromptTemplate.from_template( "{observations}\n\n" "Given only the information above, what are the 3 most salient " "high-level questions we can answer about the subjects in the statements?\n" "Provide each question on a new line." ) observations = self.memory_retriever.memory_stream[-last_k:] observation_str = "\n".join( [self._format_memory_detail(o) for o in observations] ) result = self.chain(prompt).run(observations=observation_str) return self._parse_list(result) def _get_insights_on_topic( self, topic: str, now: Optional[datetime] = None ) -> List[str]: """Generate 'insights' on a topic of reflection, based on pertinent memories.""" prompt = PromptTemplate.from_template( "Statements relevant to: '{topic}'\n" "---\n" "{related_statements}\n" "---\n" "What 5 high-level novel insights can you infer from the above statements " "that are relevant for answering the following question?\n" "Do not include any insights that are not relevant to the question.\n" "Do not repeat any insights that have already been made.\n\n" "Question: {topic}\n\n" "(example format: insight (because of 1, 5, 3))\n" ) related_memories = self.fetch_memories(topic, now=now) related_statements = "\n".join( [ self._format_memory_detail(memory, prefix=f"{i+1}. ") for i, memory in enumerate(related_memories) ] ) result = self.chain(prompt).run( topic=topic, related_statements=related_statements ) # TODO: Parse the connections between memories and insights return self._parse_list(result) def pause_to_reflect(self, now: Optional[datetime] = None) -> List[str]: """Reflect on recent observations and generate 'insights'.""" if self.verbose: logger.info("Character is reflecting") new_insights = [] topics = self._get_topics_of_reflection() for topic in topics: insights = self._get_insights_on_topic(topic, now=now) for insight in insights: self.add_memory(insight, now=now) new_insights.extend(insights) return new_insights def _score_memory_importance(self, memory_content: str) -> float: """Score the absolute importance of the given memory.""" prompt = PromptTemplate.from_template( "On the scale of 1 to 10, where 1 is purely mundane" + " (e.g., brushing teeth, making bed) and 10 is" + " extremely poignant (e.g., a break up, college" + " acceptance), rate the likely poignancy of the" + " following piece of memory. Respond with a single integer." + "\nMemory: {memory_content}" + "\nRating: " ) score = self.chain(prompt).run(memory_content=memory_content).strip() if self.verbose: logger.info(f"Importance score: {score}") match = re.search(r"^\D*(\d+)", score) if match: return (float(match.group(1)) / 10) * self.importance_weight else: return 0.0 def _score_memories_importance(self, memory_content: str) -> List[float]: """Score the absolute importance of the given memory.""" prompt = PromptTemplate.from_template( "On the scale of 1 to 10, where 1 is purely mundane" + " (e.g., brushing teeth, making bed) and 10 is" + " extremely poignant (e.g., a break up, college" + " acceptance), rate the likely poignancy of the" + " following piece of memory. Always answer with only a list of numbers." + " If just given one memory still respond in a list." + " Memories are separated by semi colans (;)" + "\Memories: {memory_content}" + "\nRating: " ) scores = self.chain(prompt).run(memory_content=memory_content).strip() if self.verbose: logger.info(f"Importance scores: {scores}") # Split into list of strings and convert to floats scores_list = [float(x) for x in scores.split(";")] return scores_list def add_memories( self, memory_content: str, now: Optional[datetime] = None ) -> List[str]: """Add an observations or memories to the agent's memory.""" importance_scores = self._score_memories_importance(memory_content) self.aggregate_importance += max(importance_scores) memory_list = memory_content.split(";") documents = [] for i in range(len(memory_list)): documents.append( Document( page_content=memory_list[i], metadata={"importance": importance_scores[i]}, ) ) result = self.memory_retriever.add_documents(documents, current_time=now) # After an agent has processed a certain amount of memories (as measured by # aggregate importance), it is time to reflect on recent events to add # more synthesized memories to the agent's memory stream. if ( self.reflection_threshold is not None and self.aggregate_importance > self.reflection_threshold and not self.reflecting ): self.reflecting = True self.pause_to_reflect(now=now) # Hack to clear the importance from reflection self.aggregate_importance = 0.0 self.reflecting = False return result def add_memory( self, memory_content: str, now: Optional[datetime] = None ) -> List[str]: """Add an observation or memory to the agent's memory.""" importance_score = self._score_memory_importance(memory_content) self.aggregate_importance += importance_score document = Document( page_content=memory_content, metadata={"importance": importance_score} ) result = self.memory_retriever.add_documents([document], current_time=now) # After an agent has processed a certain amount of memories (as measured by # aggregate importance), it is time to reflect on recent events to add # more synthesized memories to the agent's memory stream. if ( self.reflection_threshold is not None and self.aggregate_importance > self.reflection_threshold and not self.reflecting ): self.reflecting = True self.pause_to_reflect(now=now) # Hack to clear the importance from reflection self.aggregate_importance = 0.0 self.reflecting = False return result def fetch_memories( self, observation: str, now: Optional[datetime] = None ) -> List[Document]: """Fetch related memories.""" if now is not None: with mock_now(now): return self.memory_retriever.get_relevant_documents(observation) else: return self.memory_retriever.get_relevant_documents(observation) def format_memories_detail(self, relevant_memories: List[Document]) -> str: content = [] for mem in relevant_memories: content.append(self._format_memory_detail(mem, prefix="- ")) return "\n".join([f"{mem}" for mem in content]) def _format_memory_detail(self, memory: Document, prefix: str = "") -> str: created_time = memory.metadata["created_at"].strftime("%B %d, %Y, %I:%M %p") return f"{prefix}[{created_time}] {memory.page_content.strip()}" def format_memories_simple(self, relevant_memories: List[Document]) -> str: return "; ".join([f"{mem.page_content}" for mem in relevant_memories]) def _get_memories_until_limit(self, consumed_tokens: int) -> str: """Reduce the number of tokens in the documents.""" result = [] for doc in self.memory_retriever.memory_stream[::-1]: if consumed_tokens >= self.max_tokens_limit: break consumed_tokens += self.llm.get_num_tokens(doc.page_content) if consumed_tokens < self.max_tokens_limit: result.append(doc) return self.format_memories_simple(result) @property def memory_variables(self) -> List[str]: """Input keys this memory class will load dynamically.""" return [] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return key-value pairs given the text input to the chain.""" queries = inputs.get(self.queries_key) now = inputs.get(self.now_key) if queries is not None: relevant_memories = [ mem for query in queries for mem in self.fetch_memories(query, now=now) ] return { self.relevant_memories_key: self.format_memories_detail( relevant_memories ), self.relevant_memories_simple_key: self.format_memories_simple( relevant_memories ), } most_recent_memories_token = inputs.get(self.most_recent_memories_token_key) if most_recent_memories_token is not None: return { self.most_recent_memories_key: self._get_memories_until_limit( most_recent_memories_token ) } return {} def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, Any]) -> None: """Save the context of this model run to memory.""" # TODO: fix the save memory key mem = outputs.get(self.add_memory_key) now = outputs.get(self.now_key) if mem: self.add_memory(mem, now=now) def clear(self) -> None: """Clear memory contents."""
langchain.experimental.generative_agents.memory.GenerativeAgentMemory¶ class langchain.experimental.generative_agents.memory.GenerativeAgentMemory(*, llm: BaseLanguageModel, memory_retriever: TimeWeightedVectorStoreRetriever, verbose: bool = False, reflection_threshold: Optional[float] = None, current_plan: List[str] = [], importance_weight: float = 0.15, aggregate_importance: float = 0.0, max_tokens_limit: int = 1200, queries_key: str = 'queries', most_recent_memories_token_key: str = 'recent_memories_token', add_memory_key: str = 'add_memory', relevant_memories_key: str = 'relevant_memories', relevant_memories_simple_key: str = 'relevant_memories_simple', most_recent_memories_key: str = 'most_recent_memories', now_key: str = 'now', reflecting: bool = False)[source]¶ Bases: BaseMemory Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model. param add_memory_key: str = 'add_memory'¶ param aggregate_importance: float = 0.0¶ Track the sum of the ‘importance’ of recent memories. Triggers reflection when it reaches reflection_threshold. param current_plan: List[str] = []¶ The current plan of the agent. param importance_weight: float = 0.15¶ How much weight to assign the memory importance. param llm: langchain.schema.language_model.BaseLanguageModel [Required]¶ The core language model. param max_tokens_limit: int = 1200¶ param memory_retriever: langchain.retrievers.time_weighted_retriever.TimeWeightedVectorStoreRetriever [Required]¶ The retriever to fetch related memories. param most_recent_memories_key: str = 'most_recent_memories'¶ param most_recent_memories_token_key: str = 'recent_memories_token'¶ param now_key: str = 'now'¶ param queries_key: str = 'queries'¶ param reflecting: bool = False¶ param reflection_threshold: Optional[float] = None¶ When aggregate_importance exceeds reflection_threshold, stop to reflect. param relevant_memories_key: str = 'relevant_memories'¶ param relevant_memories_simple_key: str = 'relevant_memories_simple'¶ param verbose: bool = False¶ add_memories(memory_content: str, now: Optional[datetime] = None) → List[str][source]¶ Add an observations or memories to the agent’s memory. add_memory(memory_content: str, now: Optional[datetime] = None) → List[str][source]¶ Add an observation or memory to the agent’s memory. chain(prompt: PromptTemplate) → LLMChain[source]¶ clear() → None[source]¶ Clear memory contents. fetch_memories(observation: str, now: Optional[datetime] = None) → List[Document][source]¶ Fetch related memories. format_memories_detail(relevant_memories: List[Document]) → str[source]¶ format_memories_simple(relevant_memories: List[Document]) → str[source]¶ load_memory_variables(inputs: Dict[str, Any]) → Dict[str, str][source]¶ Return key-value pairs given the text input to the chain. pause_to_reflect(now: Optional[datetime] = None) → List[str][source]¶ Reflect on recent observations and generate ‘insights’. save_context(inputs: Dict[str, Any], outputs: Dict[str, Any]) → None[source]¶ Save the context of this model run to memory. to_json() → Union[SerializedConstructor, SerializedNotImplemented]¶ to_json_not_implemented() → SerializedNotImplemented¶ property lc_attributes: Dict¶ Return a list of attribute names that should be included in the serialized kwargs. These attributes must be accepted by the constructor. property lc_namespace: List[str]¶ Return the namespace of the langchain object. eg. [“langchain”, “llms”, “openai”] property lc_secrets: Dict[str, str]¶ Return a map of constructor argument names to secret ids. eg. {“openai_api_key”: “OPENAI_API_KEY”} property lc_serializable: bool¶ Return whether or not the class is serializable. property memory_variables: List[str]¶ Input keys this memory class will load dynamically. model Config¶ Bases: object Configuration for this pydantic object. arbitrary_types_allowed = True¶
Create a new model by parsing and validating input data from keyword arguments.