id
stringlengths
14
16
text
stringlengths
36
2.73k
source
stringlengths
49
117
43a0c102b9ba-1
return values [docs] def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Filter documents based on similarity of their embeddings to the query.""" stateful_documents = get_stateful_documents(documents) embedded_documents = _get_embeddings_from_stateful_docs( self.embeddings, stateful_documents ) embedded_query = self.embeddings.embed_query(query) similarity = self.similarity_fn([embedded_query], embedded_documents)[0] included_idxs = np.arange(len(embedded_documents)) if self.k is not None: included_idxs = np.argsort(similarity)[::-1][: self.k] if self.similarity_threshold is not None: similar_enough = np.where( similarity[included_idxs] > self.similarity_threshold ) included_idxs = included_idxs[similar_enough] return [stateful_documents[i] for i in included_idxs] [docs] async def acompress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Filter down documents.""" raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/embeddings_filter.html
ace4e9e3ab75-0
Source code for langchain.retrievers.document_compressors.base """Interface for retrieved document compressors.""" from abc import ABC, abstractmethod from typing import List, Sequence, Union from pydantic import BaseModel from langchain.schema import BaseDocumentTransformer, Document class BaseDocumentCompressor(BaseModel, ABC): """Base abstraction interface for document compression.""" @abstractmethod def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Compress retrieved documents given the query context.""" @abstractmethod async def acompress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Compress retrieved documents given the query context.""" [docs]class DocumentCompressorPipeline(BaseDocumentCompressor): """Document compressor that uses a pipeline of transformers.""" transformers: List[Union[BaseDocumentTransformer, BaseDocumentCompressor]] """List of document filters that are chained together and run in sequence.""" class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True [docs] def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Transform a list of documents.""" for _transformer in self.transformers: if isinstance(_transformer, BaseDocumentCompressor): documents = _transformer.compress_documents(documents, query) elif isinstance(_transformer, BaseDocumentTransformer): documents = _transformer.transform_documents(documents) else: raise ValueError(f"Got unexpected transformer type: {_transformer}") return documents [docs] async def acompress_documents( self, documents: Sequence[Document], query: str
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/base.html
ace4e9e3ab75-1
self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Compress retrieved documents given the query context.""" for _transformer in self.transformers: if isinstance(_transformer, BaseDocumentCompressor): documents = await _transformer.acompress_documents(documents, query) elif isinstance(_transformer, BaseDocumentTransformer): documents = await _transformer.atransform_documents(documents) else: raise ValueError(f"Got unexpected transformer type: {_transformer}") return documents By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/base.html
7f211cdb8959-0
Source code for langchain.retrievers.document_compressors.chain_filter """Filter that uses an LLM to drop documents that aren't relevant to the query.""" from typing import Any, Callable, Dict, Optional, Sequence from langchain import BasePromptTemplate, LLMChain, PromptTemplate from langchain.base_language import BaseLanguageModel from langchain.output_parsers.boolean import BooleanOutputParser from langchain.retrievers.document_compressors.base import BaseDocumentCompressor from langchain.retrievers.document_compressors.chain_filter_prompt import ( prompt_template, ) from langchain.schema import Document def _get_default_chain_prompt() -> PromptTemplate: return PromptTemplate( template=prompt_template, input_variables=["question", "context"], output_parser=BooleanOutputParser(), ) def default_get_input(query: str, doc: Document) -> Dict[str, Any]: """Return the compression chain input.""" return {"question": query, "context": doc.page_content} [docs]class LLMChainFilter(BaseDocumentCompressor): """Filter that drops documents that aren't relevant to the query.""" llm_chain: LLMChain """LLM wrapper to use for filtering documents. The chain prompt is expected to have a BooleanOutputParser.""" get_input: Callable[[str, Document], dict] = default_get_input """Callable for constructing the chain input from the query and a Document.""" [docs] def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Filter down documents based on their relevance to the query.""" filtered_docs = [] for doc in documents: _input = self.get_input(query, doc) include_doc = self.llm_chain.predict_and_parse(**_input)
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_filter.html
7f211cdb8959-1
include_doc = self.llm_chain.predict_and_parse(**_input) if include_doc: filtered_docs.append(doc) return filtered_docs [docs] async def acompress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Filter down documents.""" raise NotImplementedError [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[BasePromptTemplate] = None, **kwargs: Any ) -> "LLMChainFilter": _prompt = prompt if prompt is not None else _get_default_chain_prompt() llm_chain = LLMChain(llm=llm, prompt=_prompt) return cls(llm_chain=llm_chain, **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_filter.html
d321729d141d-0
Source code for langchain.retrievers.document_compressors.cohere_rerank from __future__ import annotations from typing import TYPE_CHECKING, Dict, Sequence from pydantic import Extra, root_validator from langchain.retrievers.document_compressors.base import BaseDocumentCompressor from langchain.schema import Document from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: from cohere import Client else: # We do to avoid pydantic annotation issues when actually instantiating # while keeping this import optional try: from cohere import Client except ImportError: pass [docs]class CohereRerank(BaseDocumentCompressor): client: Client top_n: int = 3 model: str = "rerank-english-v2.0" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ImportError( "Could not import cohere python package. " "Please install it with `pip install cohere`." ) return values [docs] def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: if len(documents) == 0: # to avoid empty api call return []
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/cohere_rerank.html
d321729d141d-1
return [] doc_list = list(documents) _docs = [d.page_content for d in doc_list] results = self.client.rerank( model=self.model, query=query, documents=_docs, top_n=self.top_n ) final_results = [] for r in results: doc = doc_list[r.index] doc.metadata["relevance_score"] = r.relevance_score final_results.append(doc) return final_results [docs] async def acompress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/cohere_rerank.html
8f6153d82a93-0
Source code for langchain.retrievers.document_compressors.chain_extract """DocumentFilter that uses an LLM chain to extract the relevant parts of documents.""" from __future__ import annotations import asyncio from typing import Any, Callable, Dict, Optional, Sequence from langchain import LLMChain, PromptTemplate from langchain.base_language import BaseLanguageModel from langchain.retrievers.document_compressors.base import BaseDocumentCompressor from langchain.retrievers.document_compressors.chain_extract_prompt import ( prompt_template, ) from langchain.schema import BaseOutputParser, Document def default_get_input(query: str, doc: Document) -> Dict[str, Any]: """Return the compression chain input.""" return {"question": query, "context": doc.page_content} class NoOutputParser(BaseOutputParser[str]): """Parse outputs that could return a null string of some sort.""" no_output_str: str = "NO_OUTPUT" def parse(self, text: str) -> str: cleaned_text = text.strip() if cleaned_text == self.no_output_str: return "" return cleaned_text def _get_default_chain_prompt() -> PromptTemplate: output_parser = NoOutputParser() template = prompt_template.format(no_output_str=output_parser.no_output_str) return PromptTemplate( template=template, input_variables=["question", "context"], output_parser=output_parser, ) [docs]class LLMChainExtractor(BaseDocumentCompressor): llm_chain: LLMChain """LLM wrapper to use for compressing documents.""" get_input: Callable[[str, Document], dict] = default_get_input """Callable for constructing the chain input from the query and a Document.""" [docs] def compress_documents(
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html
8f6153d82a93-1
[docs] def compress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Compress page content of raw documents.""" compressed_docs = [] for doc in documents: _input = self.get_input(query, doc) output = self.llm_chain.predict_and_parse(**_input) if len(output) == 0: continue compressed_docs.append(Document(page_content=output, metadata=doc.metadata)) return compressed_docs [docs] async def acompress_documents( self, documents: Sequence[Document], query: str ) -> Sequence[Document]: """Compress page content of raw documents asynchronously.""" outputs = await asyncio.gather( *[ self.llm_chain.apredict_and_parse(**self.get_input(query, doc)) for doc in documents ] ) compressed_docs = [] for i, doc in enumerate(documents): if len(outputs[i]) == 0: continue compressed_docs.append( Document(page_content=outputs[i], metadata=doc.metadata) ) return compressed_docs [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate] = None, get_input: Optional[Callable[[str, Document], str]] = None, llm_chain_kwargs: Optional[dict] = None, ) -> LLMChainExtractor: """Initialize from LLM.""" _prompt = prompt if prompt is not None else _get_default_chain_prompt() _get_input = get_input if get_input is not None else default_get_input
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html
8f6153d82a93-2
_get_input = get_input if get_input is not None else default_get_input llm_chain = LLMChain(llm=llm, prompt=_prompt, **(llm_chain_kwargs or {})) return cls(llm_chain=llm_chain, get_input=_get_input) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/document_compressors/chain_extract.html
ee1793b4691c-0
Source code for langchain.retrievers.self_query.base """Retriever that generates and executes structured queries over its own data source.""" from typing import Any, Dict, List, Optional, Type, cast from pydantic import BaseModel, Field, root_validator from langchain import LLMChain from langchain.base_language import BaseLanguageModel from langchain.chains.query_constructor.base import load_query_constructor_chain from langchain.chains.query_constructor.ir import StructuredQuery, Visitor from langchain.chains.query_constructor.schema import AttributeInfo from langchain.retrievers.self_query.chroma import ChromaTranslator from langchain.retrievers.self_query.pinecone import PineconeTranslator from langchain.retrievers.self_query.weaviate import WeaviateTranslator from langchain.schema import BaseRetriever, Document from langchain.vectorstores import Chroma, Pinecone, VectorStore, Weaviate def _get_builtin_translator(vectorstore_cls: Type[VectorStore]) -> Visitor: """Get the translator class corresponding to the vector store class.""" BUILTIN_TRANSLATORS: Dict[Type[VectorStore], Type[Visitor]] = { Pinecone: PineconeTranslator, Chroma: ChromaTranslator, Weaviate: WeaviateTranslator, } if vectorstore_cls not in BUILTIN_TRANSLATORS: raise ValueError( f"Self query retriever with Vector Store type {vectorstore_cls}" f" not supported." ) return BUILTIN_TRANSLATORS[vectorstore_cls]() [docs]class SelfQueryRetriever(BaseRetriever, BaseModel): """Retriever that wraps around a vector store and uses an LLM to generate the vector store queries.""" vectorstore: VectorStore """The underlying vector store from which documents will be retrieved."""
https://python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html
ee1793b4691c-1
vectorstore: VectorStore """The underlying vector store from which documents will be retrieved.""" llm_chain: LLMChain """The LLMChain for generating the vector store queries.""" search_type: str = "similarity" """The search type to perform on the vector store.""" search_kwargs: dict = Field(default_factory=dict) """Keyword arguments to pass in to the vector store search.""" structured_query_translator: Visitor """Translator for turning internal query language into vectorstore search params.""" verbose: bool = False class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @root_validator(pre=True) def validate_translator(cls, values: Dict) -> Dict: """Validate translator.""" if "structured_query_translator" not in values: vectorstore_cls = values["vectorstore"].__class__ values["structured_query_translator"] = _get_builtin_translator( vectorstore_cls ) return values [docs] def get_relevant_documents(self, query: str) -> List[Document]: """Get documents relevant for a query. Args: query: string to find relevant documents for Returns: List of relevant documents """ inputs = self.llm_chain.prep_inputs({"query": query}) structured_query = cast( StructuredQuery, self.llm_chain.predict_and_parse(callbacks=None, **inputs) ) if self.verbose: print(structured_query) new_query, new_kwargs = self.structured_query_translator.visit_structured_query( structured_query ) if structured_query.limit is not None: new_kwargs["k"] = structured_query.limit
https://python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html
ee1793b4691c-2
if structured_query.limit is not None: new_kwargs["k"] = structured_query.limit search_kwargs = {**self.search_kwargs, **new_kwargs} docs = self.vectorstore.search(new_query, self.search_type, **search_kwargs) return docs [docs] async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, vectorstore: VectorStore, document_contents: str, metadata_field_info: List[AttributeInfo], structured_query_translator: Optional[Visitor] = None, chain_kwargs: Optional[Dict] = None, enable_limit: bool = False, **kwargs: Any, ) -> "SelfQueryRetriever": if structured_query_translator is None: structured_query_translator = _get_builtin_translator(vectorstore.__class__) chain_kwargs = chain_kwargs or {} if "allowed_comparators" not in chain_kwargs: chain_kwargs[ "allowed_comparators" ] = structured_query_translator.allowed_comparators if "allowed_operators" not in chain_kwargs: chain_kwargs[ "allowed_operators" ] = structured_query_translator.allowed_operators llm_chain = load_query_constructor_chain( llm, document_contents, metadata_field_info, enable_limit=enable_limit, **chain_kwargs, ) return cls( llm_chain=llm_chain, vectorstore=vectorstore, structured_query_translator=structured_query_translator, **kwargs, ) By Harrison Chase
https://python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html
ee1793b4691c-3
**kwargs, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/retrievers/self_query/base.html
9ceb96d60ac2-0
Source code for langchain.tools.base """Base implementation for tools or skills.""" from __future__ import annotations import warnings from abc import ABC, abstractmethod from inspect import signature from typing import Any, Awaitable, Callable, Dict, Optional, Tuple, Type, Union from pydantic import ( BaseModel, Extra, Field, create_model, root_validator, validate_arguments, ) from pydantic.main import ModelMetaclass from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( AsyncCallbackManager, AsyncCallbackManagerForToolRun, CallbackManager, CallbackManagerForToolRun, Callbacks, ) class SchemaAnnotationError(TypeError): """Raised when 'args_schema' is missing or has an incorrect type annotation.""" class ToolMetaclass(ModelMetaclass): """Metaclass for BaseTool to ensure the provided args_schema doesn't silently ignored.""" def __new__( cls: Type[ToolMetaclass], name: str, bases: Tuple[Type, ...], dct: dict ) -> ToolMetaclass: """Create the definition of the new tool class.""" schema_type: Optional[Type[BaseModel]] = dct.get("args_schema") if schema_type is not None: schema_annotations = dct.get("__annotations__", {}) args_schema_type = schema_annotations.get("args_schema", None) if args_schema_type is None or args_schema_type == BaseModel: # Throw errors for common mis-annotations. # TODO: Use get_args / get_origin and fully # specify valid annotations. typehint_mandate = """ class ChildTool(BaseTool): ... args_schema: Type[BaseModel] = SchemaClass ..."""
https://python.langchain.com/en/latest/_modules/langchain/tools/base.html
9ceb96d60ac2-1
... args_schema: Type[BaseModel] = SchemaClass ...""" raise SchemaAnnotationError( f"Tool definition for {name} must include valid type annotations" f" for argument 'args_schema' to behave as expected.\n" f"Expected annotation of 'Type[BaseModel]'" f" but got '{args_schema_type}'.\n" f"Expected class looks like:\n" f"{typehint_mandate}" ) # Pass through to Pydantic's metaclass return super().__new__(cls, name, bases, dct) def _create_subset_model( name: str, model: BaseModel, field_names: list ) -> Type[BaseModel]: """Create a pydantic model with only a subset of model's fields.""" fields = { field_name: ( model.__fields__[field_name].type_, model.__fields__[field_name].default, ) for field_name in field_names if field_name in model.__fields__ } return create_model(name, **fields) # type: ignore def get_filtered_args( inferred_model: Type[BaseModel], func: Callable, ) -> dict: """Get the arguments from a function's signature.""" schema = inferred_model.schema()["properties"] valid_keys = signature(func).parameters return {k: schema[k] for k in valid_keys if k != "run_manager"} class _SchemaConfig: """Configuration for the pydantic model.""" extra = Extra.forbid arbitrary_types_allowed = True def create_schema_from_function( model_name: str, func: Callable, ) -> Type[BaseModel]:
https://python.langchain.com/en/latest/_modules/langchain/tools/base.html
9ceb96d60ac2-2
model_name: str, func: Callable, ) -> Type[BaseModel]: """Create a pydantic schema from a function's signature.""" validated = validate_arguments(func, config=_SchemaConfig) # type: ignore inferred_model = validated.model # type: ignore if "run_manager" in inferred_model.__fields__: del inferred_model.__fields__["run_manager"] # Pydantic adds placeholder virtual fields we need to strip filtered_args = get_filtered_args(inferred_model, func) return _create_subset_model( f"{model_name}Schema", inferred_model, list(filtered_args) ) [docs]class BaseTool(ABC, BaseModel, metaclass=ToolMetaclass): """Interface LangChain tools must implement.""" name: str """The unique name of the tool that clearly communicates its purpose.""" description: str """Used to tell the model how/when/why to use the tool. You can provide few-shot examples as a part of the description. """ args_schema: Optional[Type[BaseModel]] = None """Pydantic model class to validate and parse the tool's input arguments.""" return_direct: bool = False """Whether to return the tool's output directly. Setting this to True means that after the tool is called, the AgentExecutor will stop looping. """ verbose: bool = False """Whether to log the tool's progress.""" callbacks: Callbacks = Field(default=None, exclude=True) """Callbacks to be called during tool execution.""" callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) """Deprecated. Please use callbacks instead.""" class Config:
https://python.langchain.com/en/latest/_modules/langchain/tools/base.html
9ceb96d60ac2-3
"""Deprecated. Please use callbacks instead.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def is_single_input(self) -> bool: """Whether the tool only accepts a single input.""" keys = {k for k in self.args if k != "kwargs"} return len(keys) == 1 @property def args(self) -> dict: if self.args_schema is not None: return self.args_schema.schema()["properties"] else: schema = create_schema_from_function(self.name, self._run) return schema.schema()["properties"] def _parse_input( self, tool_input: Union[str, Dict], ) -> Union[str, Dict[str, Any]]: """Convert tool input to pydantic model.""" input_args = self.args_schema if isinstance(tool_input, str): if input_args is not None: key_ = next(iter(input_args.__fields__.keys())) input_args.validate({key_: tool_input}) return tool_input else: if input_args is not None: result = input_args.parse_obj(tool_input) return {k: v for k, v in result.dict().items() if k in tool_input} return tool_input @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: warnings.warn( "callback_manager is deprecated. Please use callbacks instead.", DeprecationWarning, ) values["callbacks"] = values.pop("callback_manager", None) return values
https://python.langchain.com/en/latest/_modules/langchain/tools/base.html
9ceb96d60ac2-4
values["callbacks"] = values.pop("callback_manager", None) return values @abstractmethod def _run( self, *args: Any, **kwargs: Any, ) -> Any: """Use the tool. Add run_manager: Optional[CallbackManagerForToolRun] = None to child implementations to enable tracing, """ @abstractmethod async def _arun( self, *args: Any, **kwargs: Any, ) -> Any: """Use the tool asynchronously. Add run_manager: Optional[AsyncCallbackManagerForToolRun] = None to child implementations to enable tracing, """ def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]: # For backwards compatibility, if run_input is a string, # pass as a positional argument. if isinstance(tool_input, str): return (tool_input,), {} else: return (), tool_input [docs] def run( self, tool_input: Union[str, Dict], verbose: Optional[bool] = None, start_color: Optional[str] = "green", color: Optional[str] = "green", callbacks: Callbacks = None, **kwargs: Any, ) -> Any: """Run the tool.""" parsed_input = self._parse_input(tool_input) if not self.verbose and verbose is not None: verbose_ = verbose else: verbose_ = self.verbose callback_manager = CallbackManager.configure( callbacks, self.callbacks, verbose=verbose_ ) # TODO: maybe also pass through run_manager is _run supports kwargs
https://python.langchain.com/en/latest/_modules/langchain/tools/base.html
9ceb96d60ac2-5
) # TODO: maybe also pass through run_manager is _run supports kwargs new_arg_supported = signature(self._run).parameters.get("run_manager") run_manager = callback_manager.on_tool_start( {"name": self.name, "description": self.description}, tool_input if isinstance(tool_input, str) else str(tool_input), color=start_color, **kwargs, ) try: tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input) observation = ( self._run(*tool_args, run_manager=run_manager, **tool_kwargs) if new_arg_supported else self._run(*tool_args, **tool_kwargs) ) except (Exception, KeyboardInterrupt) as e: run_manager.on_tool_error(e) raise e run_manager.on_tool_end(str(observation), color=color, name=self.name, **kwargs) return observation [docs] async def arun( self, tool_input: Union[str, Dict], verbose: Optional[bool] = None, start_color: Optional[str] = "green", color: Optional[str] = "green", callbacks: Callbacks = None, **kwargs: Any, ) -> Any: """Run the tool asynchronously.""" parsed_input = self._parse_input(tool_input) if not self.verbose and verbose is not None: verbose_ = verbose else: verbose_ = self.verbose callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, verbose=verbose_ ) new_arg_supported = signature(self._arun).parameters.get("run_manager") run_manager = await callback_manager.on_tool_start(
https://python.langchain.com/en/latest/_modules/langchain/tools/base.html
9ceb96d60ac2-6
run_manager = await callback_manager.on_tool_start( {"name": self.name, "description": self.description}, tool_input if isinstance(tool_input, str) else str(tool_input), color=start_color, **kwargs, ) try: # We then call the tool on the tool input to get an observation tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input) observation = ( await self._arun(*tool_args, run_manager=run_manager, **tool_kwargs) if new_arg_supported else await self._arun(*tool_args, **tool_kwargs) ) except (Exception, KeyboardInterrupt) as e: await run_manager.on_tool_error(e) raise e await run_manager.on_tool_end( str(observation), color=color, name=self.name, **kwargs ) return observation def __call__(self, tool_input: str, callbacks: Callbacks = None) -> str: """Make tool callable.""" return self.run(tool_input, callbacks=callbacks) [docs]class Tool(BaseTool): """Tool that takes in function or coroutine directly.""" description: str = "" func: Callable[..., str] """The function to run when the tool is called.""" coroutine: Optional[Callable[..., Awaitable[str]]] = None """The asynchronous version of the function.""" @property def args(self) -> dict: """The tool's input arguments.""" if self.args_schema is not None: return self.args_schema.schema()["properties"] # For backwards compatibility, if the function signature is ambiguous, # assume it takes a single string input. return {"tool_input": {"type": "string"}}
https://python.langchain.com/en/latest/_modules/langchain/tools/base.html
9ceb96d60ac2-7
return {"tool_input": {"type": "string"}} def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]: """Convert tool input to pydantic model.""" args, kwargs = super()._to_args_and_kwargs(tool_input) # For backwards compatibility. The tool must be run with a single input all_args = list(args) + list(kwargs.values()) if len(all_args) != 1: raise ValueError( f"Too many arguments to single-input tool {self.name}." f" Args: {all_args}" ) return tuple(all_args), {} def _run( self, *args: Any, run_manager: Optional[CallbackManagerForToolRun] = None, **kwargs: Any, ) -> Any: """Use the tool.""" new_argument_supported = signature(self.func).parameters.get("callbacks") return ( self.func( *args, callbacks=run_manager.get_child() if run_manager else None, **kwargs, ) if new_argument_supported else self.func(*args, **kwargs) ) async def _arun( self, *args: Any, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, **kwargs: Any, ) -> Any: """Use the tool asynchronously.""" if self.coroutine: new_argument_supported = signature(self.coroutine).parameters.get( "callbacks" ) return ( await self.coroutine( *args, callbacks=run_manager.get_child() if run_manager else None, **kwargs, )
https://python.langchain.com/en/latest/_modules/langchain/tools/base.html
9ceb96d60ac2-8
**kwargs, ) if new_argument_supported else await self.coroutine(*args, **kwargs) ) raise NotImplementedError("Tool does not support async") # TODO: this is for backwards compatibility, remove in future def __init__( self, name: str, func: Callable, description: str, **kwargs: Any ) -> None: """Initialize tool.""" super(Tool, self).__init__( name=name, func=func, description=description, **kwargs ) [docs] @classmethod def from_function( cls, func: Callable, name: str, # We keep these required to support backwards compatibility description: str, return_direct: bool = False, args_schema: Optional[Type[BaseModel]] = None, **kwargs: Any, ) -> Tool: """Initialize tool from a function.""" return cls( name=name, func=func, description=description, return_direct=return_direct, args_schema=args_schema, **kwargs, ) [docs]class StructuredTool(BaseTool): """Tool that can operate on any number of inputs.""" description: str = "" args_schema: Type[BaseModel] = Field(..., description="The tool schema.") """The input arguments' schema.""" func: Callable[..., Any] """The function to run when the tool is called.""" coroutine: Optional[Callable[..., Awaitable[Any]]] = None """The asynchronous version of the function.""" @property def args(self) -> dict: """The tool's input arguments.""" return self.args_schema.schema()["properties"] def _run(
https://python.langchain.com/en/latest/_modules/langchain/tools/base.html
9ceb96d60ac2-9
return self.args_schema.schema()["properties"] def _run( self, *args: Any, run_manager: Optional[CallbackManagerForToolRun] = None, **kwargs: Any, ) -> Any: """Use the tool.""" new_argument_supported = signature(self.func).parameters.get("callbacks") return ( self.func( *args, callbacks=run_manager.get_child() if run_manager else None, **kwargs, ) if new_argument_supported else self.func(*args, **kwargs) ) async def _arun( self, *args: Any, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, **kwargs: Any, ) -> str: """Use the tool asynchronously.""" if self.coroutine: new_argument_supported = signature(self.coroutine).parameters.get( "callbacks" ) return ( await self.coroutine( *args, callbacks=run_manager.get_child() if run_manager else None, **kwargs, ) if new_argument_supported else await self.coroutine(*args, **kwargs) ) raise NotImplementedError("Tool does not support async") [docs] @classmethod def from_function( cls, func: Callable, name: Optional[str] = None, description: Optional[str] = None, return_direct: bool = False, args_schema: Optional[Type[BaseModel]] = None, infer_schema: bool = True, **kwargs: Any, ) -> StructuredTool: name = name or func.__name__
https://python.langchain.com/en/latest/_modules/langchain/tools/base.html
9ceb96d60ac2-10
) -> StructuredTool: name = name or func.__name__ description = description or func.__doc__ assert ( description is not None ), "Function must have a docstring if description not provided." # Description example: # search_api(query: str) - Searches the API for the query. description = f"{name}{signature(func)} - {description.strip()}" _args_schema = args_schema if _args_schema is None and infer_schema: _args_schema = create_schema_from_function(f"{name}Schema", func) return cls( name=name, func=func, args_schema=_args_schema, description=description, return_direct=return_direct, **kwargs, ) [docs]def tool( *args: Union[str, Callable], return_direct: bool = False, args_schema: Optional[Type[BaseModel]] = None, infer_schema: bool = True, ) -> Callable: """Make tools out of functions, can be used with or without arguments. Args: *args: The arguments to the tool. return_direct: Whether to return directly from the tool rather than continuing the agent loop. args_schema: optional argument schema for user to specify infer_schema: Whether to infer the schema of the arguments from the function's signature. This also makes the resultant tool accept a dictionary input to its `run()` function. Requires: - Function must be of type (str) -> str - Function must have a docstring Examples: .. code-block:: python @tool def search_api(query: str) -> str: # Searches the API for the query.
https://python.langchain.com/en/latest/_modules/langchain/tools/base.html
9ceb96d60ac2-11
# Searches the API for the query. return @tool("search", return_direct=True) def search_api(query: str) -> str: # Searches the API for the query. return """ def _make_with_name(tool_name: str) -> Callable: def _make_tool(func: Callable) -> BaseTool: if infer_schema or args_schema is not None: return StructuredTool.from_function( func, name=tool_name, return_direct=return_direct, args_schema=args_schema, infer_schema=infer_schema, ) # If someone doesn't want a schema applied, we must treat it as # a simple string->string function assert func.__doc__ is not None, "Function must have a docstring" return Tool( name=tool_name, func=func, description=f"{tool_name} tool", return_direct=return_direct, ) return _make_tool if len(args) == 1 and isinstance(args[0], str): # if the argument is a string, then we use the string as the tool name # Example usage: @tool("search", return_direct=True) return _make_with_name(args[0]) elif len(args) == 1 and callable(args[0]): # if the argument is a function, then we use the function name as the tool name # Example usage: @tool return _make_with_name(args[0].__name__)(args[0]) elif len(args) == 0: # if there are no arguments, then we use the function name as the tool name # Example usage: @tool(return_direct=True)
https://python.langchain.com/en/latest/_modules/langchain/tools/base.html
9ceb96d60ac2-12
# Example usage: @tool(return_direct=True) def _partial(func: Callable[[str], str]) -> BaseTool: return _make_with_name(func.__name__)(func) return _partial else: raise ValueError("Too many arguments for tool decorator") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/base.html
330adb61837c-0
Source code for langchain.tools.plugin from __future__ import annotations import json from typing import Optional, Type import requests import yaml from pydantic import BaseModel from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool class ApiConfig(BaseModel): type: str url: str has_user_authentication: Optional[bool] = False class AIPlugin(BaseModel): """AI Plugin Definition.""" schema_version: str name_for_model: str name_for_human: str description_for_model: str description_for_human: str auth: Optional[dict] = None api: ApiConfig logo_url: Optional[str] contact_email: Optional[str] legal_info_url: Optional[str] @classmethod def from_url(cls, url: str) -> AIPlugin: """Instantiate AIPlugin from a URL.""" response = requests.get(url).json() return cls(**response) def marshal_spec(txt: str) -> dict: """Convert the yaml or json serialized spec to a dict.""" try: return json.loads(txt) except json.JSONDecodeError: return yaml.safe_load(txt) class AIPluginToolSchema(BaseModel): """AIPLuginToolSchema.""" tool_input: Optional[str] = "" [docs]class AIPluginTool(BaseTool): plugin: AIPlugin api_spec: str args_schema: Type[AIPluginToolSchema] = AIPluginToolSchema [docs] @classmethod def from_plugin_url(cls, url: str) -> AIPluginTool: plugin = AIPlugin.from_url(url) description = (
https://python.langchain.com/en/latest/_modules/langchain/tools/plugin.html
330adb61837c-1
plugin = AIPlugin.from_url(url) description = ( f"Call this tool to get the OpenAPI spec (and usage guide) " f"for interacting with the {plugin.name_for_human} API. " f"You should only call this ONCE! What is the " f"{plugin.name_for_human} API useful for? " ) + plugin.description_for_human open_api_spec_str = requests.get(plugin.api.url).text open_api_spec = marshal_spec(open_api_spec_str) api_spec = ( f"Usage Guide: {plugin.description_for_model}\n\n" f"OpenAPI Spec: {open_api_spec}" ) return cls( name=plugin.name_for_model, description=description, plugin=plugin, api_spec=api_spec, ) def _run( self, tool_input: Optional[str] = "", run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return self.api_spec async def _arun( self, tool_input: Optional[str] = None, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" return self.api_spec By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/plugin.html
f9c670adeca4-0
Source code for langchain.tools.ifttt """From https://github.com/SidU/teams-langchain-js/wiki/Connecting-IFTTT-Services. # Creating a webhook - Go to https://ifttt.com/create # Configuring the "If This" - Click on the "If This" button in the IFTTT interface. - Search for "Webhooks" in the search bar. - Choose the first option for "Receive a web request with a JSON payload." - Choose an Event Name that is specific to the service you plan to connect to. This will make it easier for you to manage the webhook URL. For example, if you're connecting to Spotify, you could use "Spotify" as your Event Name. - Click the "Create Trigger" button to save your settings and create your webhook. # Configuring the "Then That" - Tap on the "Then That" button in the IFTTT interface. - Search for the service you want to connect, such as Spotify. - Choose an action from the service, such as "Add track to a playlist". - Configure the action by specifying the necessary details, such as the playlist name, e.g., "Songs from AI". - Reference the JSON Payload received by the Webhook in your action. For the Spotify scenario, choose "{{JsonPayload}}" as your search query. - Tap the "Create Action" button to save your action settings. - Once you have finished configuring your action, click the "Finish" button to complete the setup. - Congratulations! You have successfully connected the Webhook to the desired service, and you're ready to start receiving data and triggering actions 🎉 # Finishing up - To get your webhook URL go to https://ifttt.com/maker_webhooks/settings
https://python.langchain.com/en/latest/_modules/langchain/tools/ifttt.html
f9c670adeca4-1
- To get your webhook URL go to https://ifttt.com/maker_webhooks/settings - Copy the IFTTT key value from there. The URL is of the form https://maker.ifttt.com/use/YOUR_IFTTT_KEY. Grab the YOUR_IFTTT_KEY value. """ from typing import Optional import requests from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool [docs]class IFTTTWebhook(BaseTool): """IFTTT Webhook. Args: name: name of the tool description: description of the tool url: url to hit with the json event. """ url: str def _run( self, tool_input: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: body = {"this": tool_input} response = requests.post(self.url, data=body) return response.text async def _arun( self, tool_input: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: raise NotImplementedError("Not implemented.") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/ifttt.html
3ddde9079b6e-0
Source code for langchain.tools.wikipedia.tool """Tool for the Wikipedia API.""" from typing import Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.wikipedia import WikipediaAPIWrapper [docs]class WikipediaQueryRun(BaseTool): """Tool that adds the capability to search using the Wikipedia API.""" name = "Wikipedia" description = ( "A wrapper around Wikipedia. " "Useful for when you need to answer general questions about " "people, places, companies, facts, historical events, or other subjects. " "Input should be a search query." ) api_wrapper: WikipediaAPIWrapper def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Wikipedia tool.""" return self.api_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the Wikipedia tool asynchronously.""" raise NotImplementedError("WikipediaQueryRun does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/wikipedia/tool.html
8af3e3efb5cc-0
Source code for langchain.tools.shell.tool import asyncio import platform import warnings from typing import List, Optional, Type, Union from pydantic import BaseModel, Field, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.bash import BashProcess class ShellInput(BaseModel): """Commands for the Bash Shell tool.""" commands: Union[str, List[str]] = Field( ..., description="List of shell commands to run. Deserialized using json.loads", ) """List of shell commands to run.""" @root_validator def _validate_commands(cls, values: dict) -> dict: """Validate commands.""" # TODO: Add real validators commands = values.get("commands") if not isinstance(commands, list): values["commands"] = [commands] # Warn that the bash tool is not safe warnings.warn( "The shell tool has no safeguards by default. Use at your own risk." ) return values def _get_default_bash_processs() -> BashProcess: """Get file path from string.""" return BashProcess(return_err_output=True) def _get_platform() -> str: """Get platform.""" system = platform.system() if system == "Darwin": return "MacOS" return system [docs]class ShellTool(BaseTool): """Tool to run shell commands.""" process: BashProcess = Field(default_factory=_get_default_bash_processs) """Bash process to run commands.""" name: str = "terminal" """Name of tool."""
https://python.langchain.com/en/latest/_modules/langchain/tools/shell/tool.html
8af3e3efb5cc-1
name: str = "terminal" """Name of tool.""" description: str = f"Run shell commands on this {_get_platform()} machine." """Description of tool.""" args_schema: Type[BaseModel] = ShellInput """Schema for input arguments.""" def _run( self, commands: Union[str, List[str]], run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Run commands and return final output.""" return self.process.run(commands) async def _arun( self, commands: Union[str, List[str]], run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Run commands asynchronously and return final output.""" return await asyncio.get_event_loop().run_in_executor( None, self.process.run, commands ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/shell/tool.html
51db1c733e70-0
Source code for langchain.tools.zapier.tool """## Zapier Natural Language Actions API \ Full docs here: https://nla.zapier.com/api/v1/docs **Zapier Natural Language Actions** gives you access to the 5k+ apps, 20k+ actions on Zapier's platform through a natural language API interface. NLA supports apps like Gmail, Salesforce, Trello, Slack, Asana, HubSpot, Google Sheets, Microsoft Teams, and thousands more apps: https://zapier.com/apps Zapier NLA handles ALL the underlying API auth and translation from natural language --> underlying API call --> return simplified output for LLMs The key idea is you, or your users, expose a set of actions via an oauth-like setup window, which you can then query and execute via a REST API. NLA offers both API Key and OAuth for signing NLA API requests. 1. Server-side (API Key): for quickly getting started, testing, and production scenarios where LangChain will only use actions exposed in the developer's Zapier account (and will use the developer's connected accounts on Zapier.com) 2. User-facing (Oauth): for production scenarios where you are deploying an end-user facing application and LangChain needs access to end-user's exposed actions and connected accounts on Zapier.com This quick start will focus on the server-side use case for brevity. Review [full docs](https://nla.zapier.com/api/v1/docs) or reach out to nla@zapier.com for user-facing oauth developer support. Typically you'd use SequentialChain, here's a basic example: 1. Use NLA to find an email in Gmail 2. Use LLMChain to generate a draft reply to (1)
https://python.langchain.com/en/latest/_modules/langchain/tools/zapier/tool.html
51db1c733e70-1
2. Use LLMChain to generate a draft reply to (1) 3. Use NLA to send the draft reply (2) to someone in Slack via direct message In code, below: ```python import os # get from https://platform.openai.com/ os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY", "") # get from https://nla.zapier.com/demo/provider/debug # (under User Information, after logging in): os.environ["ZAPIER_NLA_API_KEY"] = os.environ.get("ZAPIER_NLA_API_KEY", "") from langchain.llms import OpenAI from langchain.agents import initialize_agent from langchain.agents.agent_toolkits import ZapierToolkit from langchain.utilities.zapier import ZapierNLAWrapper ## step 0. expose gmail 'find email' and slack 'send channel message' actions # first go here, log in, expose (enable) the two actions: # https://nla.zapier.com/demo/start # -- for this example, can leave all fields "Have AI guess" # in an oauth scenario, you'd get your own <provider> id (instead of 'demo') # which you route your users through first llm = OpenAI(temperature=0) zapier = ZapierNLAWrapper() ## To leverage a nla_oauth_access_token you may pass the value to the ZapierNLAWrapper ## If you do this there is no need to initialize the ZAPIER_NLA_API_KEY env variable # zapier = ZapierNLAWrapper(zapier_nla_oauth_access_token="TOKEN_HERE") toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier) agent = initialize_agent( toolkit.get_tools(), llm,
https://python.langchain.com/en/latest/_modules/langchain/tools/zapier/tool.html
51db1c733e70-2
agent = initialize_agent( toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True ) agent.run(("Summarize the last email I received regarding Silicon Valley Bank. " "Send the summary to the #test-zapier channel in slack.")) ``` """ from typing import Any, Dict, Optional from pydantic import Field, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.zapier.prompt import BASE_ZAPIER_TOOL_PROMPT from langchain.utilities.zapier import ZapierNLAWrapper [docs]class ZapierNLARunAction(BaseTool): """ Args: action_id: a specific action ID (from list actions) of the action to execute (the set api_key must be associated with the action owner) instructions: a natural language instruction string for using the action (eg. "get the latest email from Mike Knoop" for "Gmail: find email" action) params: a dict, optional. Any params provided will *override* AI guesses from `instructions` (see "understanding the AI guessing flow" here: https://nla.zapier.com/api/v1/docs) """ api_wrapper: ZapierNLAWrapper = Field(default_factory=ZapierNLAWrapper) action_id: str params: Optional[dict] = None base_prompt: str = BASE_ZAPIER_TOOL_PROMPT zapier_description: str params_schema: Dict[str, str] = Field(default_factory=dict) name = "" description = "" @root_validator
https://python.langchain.com/en/latest/_modules/langchain/tools/zapier/tool.html
51db1c733e70-3
name = "" description = "" @root_validator def set_name_description(cls, values: Dict[str, Any]) -> Dict[str, Any]: zapier_description = values["zapier_description"] params_schema = values["params_schema"] if "instructions" in params_schema: del params_schema["instructions"] # Ensure base prompt (if overrided) contains necessary input fields necessary_fields = {"{zapier_description}", "{params}"} if not all(field in values["base_prompt"] for field in necessary_fields): raise ValueError( "Your custom base Zapier prompt must contain input fields for " "{zapier_description} and {params}." ) values["name"] = zapier_description values["description"] = values["base_prompt"].format( zapier_description=zapier_description, params=str(list(params_schema.keys())), ) return values def _run( self, instructions: str, run_manager: Optional[CallbackManagerForToolRun] = None ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" return self.api_wrapper.run_as_str(self.action_id, instructions, self.params) async def _arun( self, _: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" raise NotImplementedError("ZapierNLAListActions does not support async") ZapierNLARunAction.__doc__ = ( ZapierNLAWrapper.run.__doc__ + ZapierNLARunAction.__doc__ # type: ignore )
https://python.langchain.com/en/latest/_modules/langchain/tools/zapier/tool.html
51db1c733e70-4
) # other useful actions [docs]class ZapierNLAListActions(BaseTool): """ Args: None """ name = "Zapier NLA: List Actions" description = BASE_ZAPIER_TOOL_PROMPT + ( "This tool returns a list of the user's exposed actions." ) api_wrapper: ZapierNLAWrapper = Field(default_factory=ZapierNLAWrapper) def _run( self, _: str = "", run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" return self.api_wrapper.list_as_str() async def _arun( self, _: str = "", run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" raise NotImplementedError("ZapierNLAListActions does not support async") ZapierNLAListActions.__doc__ = ( ZapierNLAWrapper.list.__doc__ + ZapierNLAListActions.__doc__ # type: ignore ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/zapier/tool.html
c646ad0b9fa5-0
Source code for langchain.tools.google_places.tool """Tool for the Google search API.""" from typing import Optional from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utilities.google_places_api import GooglePlacesAPIWrapper class GooglePlacesSchema(BaseModel): query: str = Field(..., description="Query for goole maps") [docs]class GooglePlacesTool(BaseTool): """Tool that adds the capability to query the Google places API.""" name = "Google Places" description = ( "A wrapper around Google Places. " "Useful for when you need to validate or " "discover addressed from ambiguous text. " "Input should be a search query." ) api_wrapper: GooglePlacesAPIWrapper = Field(default_factory=GooglePlacesAPIWrapper) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" return self.api_wrapper.run(query) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("GooglePlacesRun does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/google_places/tool.html
76b92547fcbf-0
Source code for langchain.tools.azure_cognitive_services.image_analysis from __future__ import annotations import logging from typing import Any, Dict, Optional from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.azure_cognitive_services.utils import detect_file_src_type from langchain.tools.base import BaseTool from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class AzureCogsImageAnalysisTool(BaseTool): """Tool that queries the Azure Cognitive Services Image Analysis API. In order to set this up, follow instructions at: https://learn.microsoft.com/en-us/azure/cognitive-services/computer-vision/quickstarts-sdk/image-analysis-client-library-40 """ azure_cogs_key: str = "" #: :meta private: azure_cogs_endpoint: str = "" #: :meta private: vision_service: Any #: :meta private: analysis_options: Any #: :meta private: name = "Azure Cognitive Services Image Analysis" description = ( "A wrapper around Azure Cognitive Services Image Analysis. " "Useful for when you need to analyze images. " "Input should be a url to an image." ) @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env( values, "azure_cogs_key", "AZURE_COGS_KEY" ) azure_cogs_endpoint = get_from_dict_or_env( values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT" )
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/image_analysis.html
76b92547fcbf-1
values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT" ) try: import azure.ai.vision as sdk values["vision_service"] = sdk.VisionServiceOptions( endpoint=azure_cogs_endpoint, key=azure_cogs_key ) values["analysis_options"] = sdk.ImageAnalysisOptions() values["analysis_options"].features = ( sdk.ImageAnalysisFeature.CAPTION | sdk.ImageAnalysisFeature.OBJECTS | sdk.ImageAnalysisFeature.TAGS | sdk.ImageAnalysisFeature.TEXT ) except ImportError: raise ImportError( "azure-ai-vision is not installed. " "Run `pip install azure-ai-vision` to install." ) return values def _image_analysis(self, image_path: str) -> Dict: try: import azure.ai.vision as sdk except ImportError: pass image_src_type = detect_file_src_type(image_path) if image_src_type == "local": vision_source = sdk.VisionSource(filename=image_path) elif image_src_type == "remote": vision_source = sdk.VisionSource(url=image_path) else: raise ValueError(f"Invalid image path: {image_path}") image_analyzer = sdk.ImageAnalyzer( self.vision_service, vision_source, self.analysis_options ) result = image_analyzer.analyze() res_dict = {} if result.reason == sdk.ImageAnalysisResultReason.ANALYZED: if result.caption is not None: res_dict["caption"] = result.caption.content if result.objects is not None: res_dict["objects"] = [obj.name for obj in result.objects] if result.tags is not None:
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/image_analysis.html
76b92547fcbf-2
if result.tags is not None: res_dict["tags"] = [tag.name for tag in result.tags] if result.text is not None: res_dict["text"] = [line.content for line in result.text.lines] else: error_details = sdk.ImageAnalysisErrorDetails.from_result(result) raise RuntimeError( f"Image analysis failed.\n" f"Reason: {error_details.reason}\n" f"Details: {error_details.message}" ) return res_dict def _format_image_analysis_result(self, image_analysis_result: Dict) -> str: formatted_result = [] if "caption" in image_analysis_result: formatted_result.append("Caption: " + image_analysis_result["caption"]) if ( "objects" in image_analysis_result and len(image_analysis_result["objects"]) > 0 ): formatted_result.append( "Objects: " + ", ".join(image_analysis_result["objects"]) ) if "tags" in image_analysis_result and len(image_analysis_result["tags"]) > 0: formatted_result.append("Tags: " + ", ".join(image_analysis_result["tags"])) if "text" in image_analysis_result and len(image_analysis_result["text"]) > 0: formatted_result.append("Text: " + ", ".join(image_analysis_result["text"])) return "\n".join(formatted_result) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" try: image_analysis_result = self._image_analysis(query) if not image_analysis_result: return "No good image analysis result was found"
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/image_analysis.html
76b92547fcbf-3
if not image_analysis_result: return "No good image analysis result was found" return self._format_image_analysis_result(image_analysis_result) except Exception as e: raise RuntimeError(f"Error while running AzureCogsImageAnalysisTool: {e}") async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("AzureCogsImageAnalysisTool does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/image_analysis.html
65ddd3c1b66c-0
Source code for langchain.tools.azure_cognitive_services.form_recognizer from __future__ import annotations import logging from typing import Any, Dict, List, Optional from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.azure_cognitive_services.utils import detect_file_src_type from langchain.tools.base import BaseTool from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class AzureCogsFormRecognizerTool(BaseTool): """Tool that queries the Azure Cognitive Services Form Recognizer API. In order to set this up, follow instructions at: https://learn.microsoft.com/en-us/azure/applied-ai-services/form-recognizer/quickstarts/get-started-sdks-rest-api?view=form-recog-3.0.0&pivots=programming-language-python """ azure_cogs_key: str = "" #: :meta private: azure_cogs_endpoint: str = "" #: :meta private: doc_analysis_client: Any #: :meta private: name = "Azure Cognitive Services Form Recognizer" description = ( "A wrapper around Azure Cognitive Services Form Recognizer. " "Useful for when you need to " "extract text, tables, and key-value pairs from documents. " "Input should be a url to a document." ) @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env( values, "azure_cogs_key", "AZURE_COGS_KEY" )
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/form_recognizer.html
65ddd3c1b66c-1
values, "azure_cogs_key", "AZURE_COGS_KEY" ) azure_cogs_endpoint = get_from_dict_or_env( values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT" ) try: from azure.ai.formrecognizer import DocumentAnalysisClient from azure.core.credentials import AzureKeyCredential values["doc_analysis_client"] = DocumentAnalysisClient( endpoint=azure_cogs_endpoint, credential=AzureKeyCredential(azure_cogs_key), ) except ImportError: raise ImportError( "azure-ai-formrecognizer is not installed. " "Run `pip install azure-ai-formrecognizer` to install." ) return values def _parse_tables(self, tables: List[Any]) -> List[Any]: result = [] for table in tables: rc, cc = table.row_count, table.column_count _table = [["" for _ in range(cc)] for _ in range(rc)] for cell in table.cells: _table[cell.row_index][cell.column_index] = cell.content result.append(_table) return result def _parse_kv_pairs(self, kv_pairs: List[Any]) -> List[Any]: result = [] for kv_pair in kv_pairs: key = kv_pair.key.content if kv_pair.key else "" value = kv_pair.value.content if kv_pair.value else "" result.append((key, value)) return result def _document_analysis(self, document_path: str) -> Dict: document_src_type = detect_file_src_type(document_path) if document_src_type == "local": with open(document_path, "rb") as document:
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/form_recognizer.html
65ddd3c1b66c-2
with open(document_path, "rb") as document: poller = self.doc_analysis_client.begin_analyze_document( "prebuilt-document", document ) elif document_src_type == "remote": poller = self.doc_analysis_client.begin_analyze_document_from_url( "prebuilt-document", document_path ) else: raise ValueError(f"Invalid document path: {document_path}") result = poller.result() res_dict = {} if result.content is not None: res_dict["content"] = result.content if result.tables is not None: res_dict["tables"] = self._parse_tables(result.tables) if result.key_value_pairs is not None: res_dict["key_value_pairs"] = self._parse_kv_pairs(result.key_value_pairs) return res_dict def _format_document_analysis_result(self, document_analysis_result: Dict) -> str: formatted_result = [] if "content" in document_analysis_result: formatted_result.append( f"Content: {document_analysis_result['content']}".replace("\n", " ") ) if "tables" in document_analysis_result: for i, table in enumerate(document_analysis_result["tables"]): formatted_result.append(f"Table {i}: {table}".replace("\n", " ")) if "key_value_pairs" in document_analysis_result: for kv_pair in document_analysis_result["key_value_pairs"]: formatted_result.append( f"{kv_pair[0]}: {kv_pair[1]}".replace("\n", " ") ) return "\n".join(formatted_result) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None,
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/form_recognizer.html
65ddd3c1b66c-3
run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" try: document_analysis_result = self._document_analysis(query) if not document_analysis_result: return "No good document analysis result was found" return self._format_document_analysis_result(document_analysis_result) except Exception as e: raise RuntimeError(f"Error while running AzureCogsFormRecognizerTool: {e}") async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("AzureCogsFormRecognizerTool does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/form_recognizer.html
c3014c1a4b2b-0
Source code for langchain.tools.azure_cognitive_services.speech2text from __future__ import annotations import logging import time from typing import Any, Dict, Optional from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.azure_cognitive_services.utils import ( detect_file_src_type, download_audio_from_url, ) from langchain.tools.base import BaseTool from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class AzureCogsSpeech2TextTool(BaseTool): """Tool that queries the Azure Cognitive Services Speech2Text API. In order to set this up, follow instructions at: https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-speech-to-text?pivots=programming-language-python """ azure_cogs_key: str = "" #: :meta private: azure_cogs_region: str = "" #: :meta private: speech_language: str = "en-US" #: :meta private: speech_config: Any #: :meta private: name = "Azure Cognitive Services Speech2Text" description = ( "A wrapper around Azure Cognitive Services Speech2Text. " "Useful for when you need to transcribe audio to text. " "Input should be a url to an audio file." ) @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env( values, "azure_cogs_key", "AZURE_COGS_KEY" )
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/speech2text.html
c3014c1a4b2b-1
values, "azure_cogs_key", "AZURE_COGS_KEY" ) azure_cogs_region = get_from_dict_or_env( values, "azure_cogs_region", "AZURE_COGS_REGION" ) try: import azure.cognitiveservices.speech as speechsdk values["speech_config"] = speechsdk.SpeechConfig( subscription=azure_cogs_key, region=azure_cogs_region ) except ImportError: raise ImportError( "azure-cognitiveservices-speech is not installed. " "Run `pip install azure-cognitiveservices-speech` to install." ) return values def _continuous_recognize(self, speech_recognizer: Any) -> str: done = False text = "" def stop_cb(evt: Any) -> None: """callback that stop continuous recognition""" speech_recognizer.stop_continuous_recognition_async() nonlocal done done = True def retrieve_cb(evt: Any) -> None: """callback that retrieves the intermediate recognition results""" nonlocal text text += evt.result.text # retrieve text on recognized events speech_recognizer.recognized.connect(retrieve_cb) # stop continuous recognition on either session stopped or canceled events speech_recognizer.session_stopped.connect(stop_cb) speech_recognizer.canceled.connect(stop_cb) # Start continuous speech recognition speech_recognizer.start_continuous_recognition_async() while not done: time.sleep(0.5) return text def _speech2text(self, audio_path: str, speech_language: str) -> str: try: import azure.cognitiveservices.speech as speechsdk
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/speech2text.html
c3014c1a4b2b-2
try: import azure.cognitiveservices.speech as speechsdk except ImportError: pass audio_src_type = detect_file_src_type(audio_path) if audio_src_type == "local": audio_config = speechsdk.AudioConfig(filename=audio_path) elif audio_src_type == "remote": tmp_audio_path = download_audio_from_url(audio_path) audio_config = speechsdk.AudioConfig(filename=tmp_audio_path) else: raise ValueError(f"Invalid audio path: {audio_path}") self.speech_config.speech_recognition_language = speech_language speech_recognizer = speechsdk.SpeechRecognizer(self.speech_config, audio_config) return self._continuous_recognize(speech_recognizer) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" try: text = self._speech2text(query, self.speech_language) return text except Exception as e: raise RuntimeError(f"Error while running AzureCogsSpeech2TextTool: {e}") async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("AzureCogsSpeech2TextTool does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/speech2text.html
e70c41afbe39-0
Source code for langchain.tools.azure_cognitive_services.text2speech from __future__ import annotations import logging import tempfile from typing import Any, Dict, Optional from pydantic import root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class AzureCogsText2SpeechTool(BaseTool): """Tool that queries the Azure Cognitive Services Text2Speech API. In order to set this up, follow instructions at: https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-text-to-speech?pivots=programming-language-python """ azure_cogs_key: str = "" #: :meta private: azure_cogs_region: str = "" #: :meta private: speech_language: str = "en-US" #: :meta private: speech_config: Any #: :meta private: name = "Azure Cognitive Services Text2Speech" description = ( "A wrapper around Azure Cognitive Services Text2Speech. " "Useful for when you need to convert text to speech. " ) @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env( values, "azure_cogs_key", "AZURE_COGS_KEY" ) azure_cogs_region = get_from_dict_or_env( values, "azure_cogs_region", "AZURE_COGS_REGION" ) try:
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/text2speech.html
e70c41afbe39-1
) try: import azure.cognitiveservices.speech as speechsdk values["speech_config"] = speechsdk.SpeechConfig( subscription=azure_cogs_key, region=azure_cogs_region ) except ImportError: raise ImportError( "azure-cognitiveservices-speech is not installed. " "Run `pip install azure-cognitiveservices-speech` to install." ) return values def _text2speech(self, text: str, speech_language: str) -> str: try: import azure.cognitiveservices.speech as speechsdk except ImportError: pass self.speech_config.speech_synthesis_language = speech_language speech_synthesizer = speechsdk.SpeechSynthesizer( speech_config=self.speech_config, audio_config=None ) result = speech_synthesizer.speak_text(text) if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted: stream = speechsdk.AudioDataStream(result) with tempfile.NamedTemporaryFile( mode="wb", suffix=".wav", delete=False ) as f: stream.save_to_wav_file(f.name) return f.name elif result.reason == speechsdk.ResultReason.Canceled: cancellation_details = result.cancellation_details logger.debug(f"Speech synthesis canceled: {cancellation_details.reason}") if cancellation_details.reason == speechsdk.CancellationReason.Error: raise RuntimeError( f"Speech synthesis error: {cancellation_details.error_details}" ) return "Speech synthesis canceled." else: return f"Speech synthesis failed: {result.reason}" def _run( self, query: str,
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/text2speech.html
e70c41afbe39-2
def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" try: speech_file = self._text2speech(query, self.speech_language) return speech_file except Exception as e: raise RuntimeError(f"Error while running AzureCogsText2SpeechTool: {e}") async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("AzureCogsText2SpeechTool does not support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/azure_cognitive_services/text2speech.html
1b6b3d751132-0
Source code for langchain.tools.file_management.read from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class ReadFileInput(BaseModel): """Input for ReadFileTool.""" file_path: str = Field(..., description="name of file") [docs]class ReadFileTool(BaseFileToolMixin, BaseTool): name: str = "read_file" args_schema: Type[BaseModel] = ReadFileInput description: str = "Read file from disk" def _run( self, file_path: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: read_path = self.get_relative_path(file_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path) if not read_path.exists(): return f"Error: no such file or directory: {file_path}" try: with read_path.open("r", encoding="utf-8") as f: content = f.read() return content except Exception as e: return "Error: " + str(e) async def _arun( self, file_path: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError By Harrison Chase
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/read.html
1b6b3d751132-1
# TODO: Add aiofiles method raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/read.html
40d792e6a15d-0
Source code for langchain.tools.file_management.file_search import fnmatch import os from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class FileSearchInput(BaseModel): """Input for FileSearchTool.""" dir_path: str = Field( default=".", description="Subdirectory to search in.", ) pattern: str = Field( ..., description="Unix shell regex, where * matches everything.", ) [docs]class FileSearchTool(BaseFileToolMixin, BaseTool): name: str = "file_search" args_schema: Type[BaseModel] = FileSearchInput description: str = ( "Recursively search for files in a subdirectory that match the regex pattern" ) def _run( self, pattern: str, dir_path: str = ".", run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: dir_path_ = self.get_relative_path(dir_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path) matches = [] try: for root, _, filenames in os.walk(dir_path_): for filename in fnmatch.filter(filenames, pattern): absolute_path = os.path.join(root, filename) relative_path = os.path.relpath(absolute_path, dir_path_) matches.append(relative_path) if matches:
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/file_search.html
40d792e6a15d-1
matches.append(relative_path) if matches: return "\n".join(matches) else: return f"No files found for pattern {pattern} in directory {dir_path}" except Exception as e: return "Error: " + str(e) async def _arun( self, dir_path: str, pattern: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/file_search.html
2701baa9e85a-0
Source code for langchain.tools.file_management.list_dir import os from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class DirectoryListingInput(BaseModel): """Input for ListDirectoryTool.""" dir_path: str = Field(default=".", description="Subdirectory to list.") [docs]class ListDirectoryTool(BaseFileToolMixin, BaseTool): name: str = "list_directory" args_schema: Type[BaseModel] = DirectoryListingInput description: str = "List files and directories in a specified folder" def _run( self, dir_path: str = ".", run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: dir_path_ = self.get_relative_path(dir_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path) try: entries = os.listdir(dir_path_) if entries: return "\n".join(entries) else: return f"No files found in directory {dir_path}" except Exception as e: return "Error: " + str(e) async def _arun( self, dir_path: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/list_dir.html
2701baa9e85a-1
raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/list_dir.html
91ec62a5f730-0
Source code for langchain.tools.file_management.write from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class WriteFileInput(BaseModel): """Input for WriteFileTool.""" file_path: str = Field(..., description="name of file") text: str = Field(..., description="text to write to file") append: bool = Field( default=False, description="Whether to append to an existing file." ) [docs]class WriteFileTool(BaseFileToolMixin, BaseTool): name: str = "write_file" args_schema: Type[BaseModel] = WriteFileInput description: str = "Write file to disk" def _run( self, file_path: str, text: str, append: bool = False, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: write_path = self.get_relative_path(file_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path) try: write_path.parent.mkdir(exist_ok=True, parents=False) mode = "a" if append else "w" with write_path.open(mode, encoding="utf-8") as f: f.write(text) return f"File written successfully to {file_path}." except Exception as e: return "Error: " + str(e)
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/write.html
91ec62a5f730-1
except Exception as e: return "Error: " + str(e) async def _arun( self, file_path: str, text: str, append: bool = False, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/write.html
0fd49865cac1-0
Source code for langchain.tools.file_management.copy import shutil from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class FileCopyInput(BaseModel): """Input for CopyFileTool.""" source_path: str = Field(..., description="Path of the file to copy") destination_path: str = Field(..., description="Path to save the copied file") [docs]class CopyFileTool(BaseFileToolMixin, BaseTool): name: str = "copy_file" args_schema: Type[BaseModel] = FileCopyInput description: str = "Create a copy of a file in a specified location" def _run( self, source_path: str, destination_path: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: source_path_ = self.get_relative_path(source_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format( arg_name="source_path", value=source_path ) try: destination_path_ = self.get_relative_path(destination_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format( arg_name="destination_path", value=destination_path ) try: shutil.copy2(source_path_, destination_path_, follow_symlinks=False) return f"File copied successfully from {source_path} to {destination_path}." except Exception as e:
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/copy.html
0fd49865cac1-1
except Exception as e: return "Error: " + str(e) async def _arun( self, source_path: str, destination_path: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/copy.html
f20d76a0b713-0
Source code for langchain.tools.file_management.move import shutil from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class FileMoveInput(BaseModel): """Input for MoveFileTool.""" source_path: str = Field(..., description="Path of the file to move") destination_path: str = Field(..., description="New path for the moved file") [docs]class MoveFileTool(BaseFileToolMixin, BaseTool): name: str = "move_file" args_schema: Type[BaseModel] = FileMoveInput description: str = "Move or rename a file from one location to another" def _run( self, source_path: str, destination_path: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: source_path_ = self.get_relative_path(source_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format( arg_name="source_path", value=source_path ) try: destination_path_ = self.get_relative_path(destination_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format( arg_name="destination_path_", value=destination_path_ ) if not source_path_.exists(): return f"Error: no such file or directory {source_path}" try: # shutil.move expects str args in 3.8 shutil.move(str(source_path_), destination_path_)
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/move.html
f20d76a0b713-1
shutil.move(str(source_path_), destination_path_) return f"File moved successfully from {source_path} to {destination_path}." except Exception as e: return "Error: " + str(e) async def _arun( self, source_path: str, destination_path: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/move.html
d5e6dab9034b-0
Source code for langchain.tools.file_management.delete import os from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.base import BaseTool from langchain.tools.file_management.utils import ( INVALID_PATH_TEMPLATE, BaseFileToolMixin, FileValidationError, ) class FileDeleteInput(BaseModel): """Input for DeleteFileTool.""" file_path: str = Field(..., description="Path of the file to delete") [docs]class DeleteFileTool(BaseFileToolMixin, BaseTool): name: str = "file_delete" args_schema: Type[BaseModel] = FileDeleteInput description: str = "Delete a file" def _run( self, file_path: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: try: file_path_ = self.get_relative_path(file_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path) if not file_path_.exists(): return f"Error: no such file or directory: {file_path}" try: os.remove(file_path_) return f"File deleted successfully: {file_path}." except Exception as e: return "Error: " + str(e) async def _arun( self, file_path: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: # TODO: Add aiofiles method raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/delete.html
d5e6dab9034b-1
raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/file_management/delete.html
c1dd2e4cc55c-0
Source code for langchain.tools.playwright.navigate_back from __future__ import annotations from typing import Optional, Type from pydantic import BaseModel from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import ( aget_current_page, get_current_page, ) [docs]class NavigateBackTool(BaseBrowserTool): """Navigate back to the previous page in the browser history.""" name: str = "previous_webpage" description: str = "Navigate back to the previous page in the browser history" args_schema: Type[BaseModel] = BaseModel def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) response = page.go_back() if response: return ( f"Navigated back to the previous page with URL '{response.url}'." f" Status code {response.status}" ) else: return "Unable to navigate back; no previous page in the history" async def _arun( self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) response = await page.go_back() if response: return (
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/navigate_back.html
c1dd2e4cc55c-1
response = await page.go_back() if response: return ( f"Navigated back to the previous page with URL '{response.url}'." f" Status code {response.status}" ) else: return "Unable to navigate back; no previous page in the history" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/navigate_back.html
09bc1050c547-0
Source code for langchain.tools.playwright.navigate from __future__ import annotations from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import ( aget_current_page, get_current_page, ) class NavigateToolInput(BaseModel): """Input for NavigateToolInput.""" url: str = Field(..., description="url to navigate to") [docs]class NavigateTool(BaseBrowserTool): name: str = "navigate_browser" description: str = "Navigate a browser to the specified URL" args_schema: Type[BaseModel] = NavigateToolInput def _run( self, url: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) response = page.goto(url) status = response.status if response else "unknown" return f"Navigating to {url} returned status code {status}" async def _arun( self, url: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) response = await page.goto(url)
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/navigate.html
09bc1050c547-1
response = await page.goto(url) status = response.status if response else "unknown" return f"Navigating to {url} returned status code {status}" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/navigate.html
437ff1f04829-0
Source code for langchain.tools.playwright.extract_hyperlinks from __future__ import annotations import json from typing import TYPE_CHECKING, Any, Optional, Type from pydantic import BaseModel, Field, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import aget_current_page, get_current_page if TYPE_CHECKING: pass class ExtractHyperlinksToolInput(BaseModel): """Input for ExtractHyperlinksTool.""" absolute_urls: bool = Field( default=False, description="Return absolute URLs instead of relative URLs", ) [docs]class ExtractHyperlinksTool(BaseBrowserTool): """Extract all hyperlinks on the page.""" name: str = "extract_hyperlinks" description: str = "Extract all hyperlinks on the current webpage" args_schema: Type[BaseModel] = ExtractHyperlinksToolInput @root_validator def check_bs_import(cls, values: dict) -> dict: """Check that the arguments are valid.""" try: from bs4 import BeautifulSoup # noqa: F401 except ImportError: raise ValueError( "The 'beautifulsoup4' package is required to use this tool." " Please install it with 'pip install beautifulsoup4'." ) return values [docs] @staticmethod def scrape_page(page: Any, html_content: str, absolute_urls: bool) -> str: from urllib.parse import urljoin from bs4 import BeautifulSoup # Parse the HTML content with BeautifulSoup soup = BeautifulSoup(html_content, "lxml") # Find all the anchor elements and extract their href attributes
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/extract_hyperlinks.html
437ff1f04829-1
# Find all the anchor elements and extract their href attributes anchors = soup.find_all("a") if absolute_urls: base_url = page.url links = [urljoin(base_url, anchor.get("href", "")) for anchor in anchors] else: links = [anchor.get("href", "") for anchor in anchors] # Return the list of links as a JSON string return json.dumps(links) def _run( self, absolute_urls: bool = False, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) html_content = page.content() return self.scrape_page(page, html_content, absolute_urls) async def _arun( self, absolute_urls: bool = False, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) html_content = await page.content() return self.scrape_page(page, html_content, absolute_urls) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/extract_hyperlinks.html
8f10b404113c-0
Source code for langchain.tools.playwright.current_page from __future__ import annotations from typing import Optional, Type from pydantic import BaseModel from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import aget_current_page, get_current_page [docs]class CurrentWebPageTool(BaseBrowserTool): name: str = "current_webpage" description: str = "Returns the URL of the current page" args_schema: Type[BaseModel] = BaseModel def _run( self, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) return str(page.url) async def _arun( self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) return str(page.url) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/current_page.html
3f54552d22bc-0
Source code for langchain.tools.playwright.click from __future__ import annotations from typing import Optional, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import ( aget_current_page, get_current_page, ) class ClickToolInput(BaseModel): """Input for ClickTool.""" selector: str = Field(..., description="CSS selector for the element to click") [docs]class ClickTool(BaseBrowserTool): name: str = "click_element" description: str = "Click on an element with the given CSS selector" args_schema: Type[BaseModel] = ClickToolInput visible_only: bool = True """Whether to consider only visible elements.""" playwright_strict: bool = False """Whether to employ Playwright's strict mode when clicking on elements.""" playwright_timeout: float = 1_000 """Timeout (in ms) for Playwright to wait for element to be ready.""" def _selector_effective(self, selector: str) -> str: if not self.visible_only: return selector return f"{selector} >> visible=1" def _run( self, selector: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) # Navigate to the desired webpage before using this tool
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/click.html
3f54552d22bc-1
# Navigate to the desired webpage before using this tool selector_effective = self._selector_effective(selector=selector) from playwright.sync_api import TimeoutError as PlaywrightTimeoutError try: page.click( selector_effective, strict=self.playwright_strict, timeout=self.playwright_timeout, ) except PlaywrightTimeoutError: return f"Unable to click on element '{selector}'" return f"Clicked element '{selector}'" async def _arun( self, selector: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) # Navigate to the desired webpage before using this tool selector_effective = self._selector_effective(selector=selector) from playwright.async_api import TimeoutError as PlaywrightTimeoutError try: await page.click( selector_effective, strict=self.playwright_strict, timeout=self.playwright_timeout, ) except PlaywrightTimeoutError: return f"Unable to click on element '{selector}'" return f"Clicked element '{selector}'" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/click.html
a989598aa7a6-0
Source code for langchain.tools.playwright.extract_text from __future__ import annotations from typing import Optional, Type from pydantic import BaseModel, root_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import aget_current_page, get_current_page [docs]class ExtractTextTool(BaseBrowserTool): name: str = "extract_text" description: str = "Extract all the text on the current webpage" args_schema: Type[BaseModel] = BaseModel @root_validator def check_acheck_bs_importrgs(cls, values: dict) -> dict: """Check that the arguments are valid.""" try: from bs4 import BeautifulSoup # noqa: F401 except ImportError: raise ValueError( "The 'beautifulsoup4' package is required to use this tool." " Please install it with 'pip install beautifulsoup4'." ) return values def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str: """Use the tool.""" # Use Beautiful Soup since it's faster than looping through the elements from bs4 import BeautifulSoup if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) html_content = page.content() # Parse the HTML content with BeautifulSoup soup = BeautifulSoup(html_content, "lxml") return " ".join(text for text in soup.stripped_strings) async def _arun( self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/extract_text.html
a989598aa7a6-1
self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}") # Use Beautiful Soup since it's faster than looping through the elements from bs4 import BeautifulSoup page = await aget_current_page(self.async_browser) html_content = await page.content() # Parse the HTML content with BeautifulSoup soup = BeautifulSoup(html_content, "lxml") return " ".join(text for text in soup.stripped_strings) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/extract_text.html
c947f4210ceb-0
Source code for langchain.tools.playwright.get_elements from __future__ import annotations import json from typing import TYPE_CHECKING, List, Optional, Sequence, Type from pydantic import BaseModel, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools.playwright.base import BaseBrowserTool from langchain.tools.playwright.utils import aget_current_page, get_current_page if TYPE_CHECKING: from playwright.async_api import Page as AsyncPage from playwright.sync_api import Page as SyncPage class GetElementsToolInput(BaseModel): """Input for GetElementsTool.""" selector: str = Field( ..., description="CSS selector, such as '*', 'div', 'p', 'a', #id, .classname", ) attributes: List[str] = Field( default_factory=lambda: ["innerText"], description="Set of attributes to retrieve for each element", ) async def _aget_elements( page: AsyncPage, selector: str, attributes: Sequence[str] ) -> List[dict]: """Get elements matching the given CSS selector.""" elements = await page.query_selector_all(selector) results = [] for element in elements: result = {} for attribute in attributes: if attribute == "innerText": val: Optional[str] = await element.inner_text() else: val = await element.get_attribute(attribute) if val is not None and val.strip() != "": result[attribute] = val if result: results.append(result) return results def _get_elements( page: SyncPage, selector: str, attributes: Sequence[str] ) -> List[dict]:
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/get_elements.html
c947f4210ceb-1
) -> List[dict]: """Get elements matching the given CSS selector.""" elements = page.query_selector_all(selector) results = [] for element in elements: result = {} for attribute in attributes: if attribute == "innerText": val: Optional[str] = element.inner_text() else: val = element.get_attribute(attribute) if val is not None and val.strip() != "": result[attribute] = val if result: results.append(result) return results [docs]class GetElementsTool(BaseBrowserTool): name: str = "get_elements" description: str = ( "Retrieve elements in the current web page matching the given CSS selector" ) args_schema: Type[BaseModel] = GetElementsToolInput def _run( self, selector: str, attributes: Sequence[str] = ["innerText"], run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f"Synchronous browser not provided to {self.name}") page = get_current_page(self.sync_browser) # Navigate to the desired webpage before using this tool results = _get_elements(page, selector, attributes) return json.dumps(results, ensure_ascii=False) async def _arun( self, selector: str, attributes: Sequence[str] = ["innerText"], run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" if self.async_browser is None: raise ValueError(f"Asynchronous browser not provided to {self.name}")
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/get_elements.html
c947f4210ceb-2
raise ValueError(f"Asynchronous browser not provided to {self.name}") page = await aget_current_page(self.async_browser) # Navigate to the desired webpage before using this tool results = await _aget_elements(page, selector, attributes) return json.dumps(results, ensure_ascii=False) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/playwright/get_elements.html
3a584bd2f335-0
Source code for langchain.tools.youtube.search """ Adapted from https://github.com/venuv/langchain_yt_tools CustomYTSearchTool searches YouTube videos related to a person and returns a specified number of video URLs. Input to this tool should be a comma separated list, - the first part contains a person name - and the second(optional) a number that is the maximum number of video results to return """ import json from typing import Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, ) from langchain.tools import BaseTool [docs]class YouTubeSearchTool(BaseTool): name = "YouTubeSearch" description = ( "search for youtube videos associated with a person. " "the input to this tool should be a comma separated list, " "the first part contains a person name and the second a " "number that is the maximum number of video results " "to return aka num_results. the second part is optional" ) def _search(self, person: str, num_results: int) -> str: from youtube_search import YoutubeSearch results = YoutubeSearch(person, num_results).to_json() data = json.loads(results) url_suffix_list = [video["url_suffix"] for video in data["videos"]] return str(url_suffix_list) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """Use the tool.""" values = query.split(",") person = values[0] if len(values) > 1: num_results = int(values[1]) else:
https://python.langchain.com/en/latest/_modules/langchain/tools/youtube/search.html
3a584bd2f335-1
num_results = int(values[1]) else: num_results = 2 return self._search(person, num_results) async def _arun( self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None, ) -> str: """Use the tool asynchronously.""" raise NotImplementedError("YouTubeSearchTool does not yet support async") By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/youtube/search.html
11bbba842a4f-0
Source code for langchain.tools.openapi.utils.api_models """Pydantic models for parsing an OpenAPI spec.""" import logging from enum import Enum from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union from openapi_schema_pydantic import MediaType, Parameter, Reference, RequestBody, Schema from pydantic import BaseModel, Field from langchain.tools.openapi.utils.openapi_utils import HTTPVerb, OpenAPISpec logger = logging.getLogger(__name__) PRIMITIVE_TYPES = { "integer": int, "number": float, "string": str, "boolean": bool, "array": List, "object": Dict, "null": None, } # See https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#parameterIn # for more info. class APIPropertyLocation(Enum): """The location of the property.""" QUERY = "query" PATH = "path" HEADER = "header" COOKIE = "cookie" # Not yet supported @classmethod def from_str(cls, location: str) -> "APIPropertyLocation": """Parse an APIPropertyLocation.""" try: return cls(location) except ValueError: raise ValueError( f"Invalid APIPropertyLocation. Valid values are {cls.__members__}" ) _SUPPORTED_MEDIA_TYPES = ("application/json",) SUPPORTED_LOCATIONS = { APIPropertyLocation.QUERY, APIPropertyLocation.PATH, } INVALID_LOCATION_TEMPL = ( 'Unsupported APIPropertyLocation "{location}"' " for parameter {name}. " + f"Valid values are {[loc.value for loc in SUPPORTED_LOCATIONS]}" )
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
11bbba842a4f-1
+ f"Valid values are {[loc.value for loc in SUPPORTED_LOCATIONS]}" ) SCHEMA_TYPE = Union[str, Type, tuple, None, Enum] class APIPropertyBase(BaseModel): """Base model for an API property.""" # The name of the parameter is required and is case sensitive. # If "in" is "path", the "name" field must correspond to a template expression # within the path field in the Paths Object. # If "in" is "header" and the "name" field is "Accept", "Content-Type", # or "Authorization", the parameter definition is ignored. # For all other cases, the "name" corresponds to the parameter # name used by the "in" property. name: str = Field(alias="name") """The name of the property.""" required: bool = Field(alias="required") """Whether the property is required.""" type: SCHEMA_TYPE = Field(alias="type") """The type of the property. Either a primitive type, a component/parameter type, or an array or 'object' (dict) of the above.""" default: Optional[Any] = Field(alias="default", default=None) """The default value of the property.""" description: Optional[str] = Field(alias="description", default=None) """The description of the property.""" class APIProperty(APIPropertyBase): """A model for a property in the query, path, header, or cookie params.""" location: APIPropertyLocation = Field(alias="location") """The path/how it's being passed to the endpoint.""" @staticmethod def _cast_schema_list_type(schema: Schema) -> Optional[Union[str, Tuple[str, ...]]]:
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
11bbba842a4f-2
type_ = schema.type if not isinstance(type_, list): return type_ else: return tuple(type_) @staticmethod def _get_schema_type_for_enum(parameter: Parameter, schema: Schema) -> Enum: """Get the schema type when the parameter is an enum.""" param_name = f"{parameter.name}Enum" return Enum(param_name, {str(v): v for v in schema.enum}) @staticmethod def _get_schema_type_for_array( schema: Schema, ) -> Optional[Union[str, Tuple[str, ...]]]: items = schema.items if isinstance(items, Schema): schema_type = APIProperty._cast_schema_list_type(items) elif isinstance(items, Reference): ref_name = items.ref.split("/")[-1] schema_type = ref_name # TODO: Add ref definitions to make his valid else: raise ValueError(f"Unsupported array items: {items}") if isinstance(schema_type, str): # TODO: recurse schema_type = (schema_type,) return schema_type @staticmethod def _get_schema_type(parameter: Parameter, schema: Optional[Schema]) -> SCHEMA_TYPE: if schema is None: return None schema_type: SCHEMA_TYPE = APIProperty._cast_schema_list_type(schema) if schema_type == "array": schema_type = APIProperty._get_schema_type_for_array(schema) elif schema_type == "object": # TODO: Resolve array and object types to components. raise NotImplementedError("Objects not yet supported") elif schema_type in PRIMITIVE_TYPES: if schema.enum: schema_type = APIProperty._get_schema_type_for_enum(parameter, schema) else:
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
11bbba842a4f-3
schema_type = APIProperty._get_schema_type_for_enum(parameter, schema) else: # Directly use the primitive type pass else: raise NotImplementedError(f"Unsupported type: {schema_type}") return schema_type @staticmethod def _validate_location(location: APIPropertyLocation, name: str) -> None: if location not in SUPPORTED_LOCATIONS: raise NotImplementedError( INVALID_LOCATION_TEMPL.format(location=location, name=name) ) @staticmethod def _validate_content(content: Optional[Dict[str, MediaType]]) -> None: if content: raise ValueError( "API Properties with media content not supported. " "Media content only supported within APIRequestBodyProperty's" ) @staticmethod def _get_schema(parameter: Parameter, spec: OpenAPISpec) -> Optional[Schema]: schema = parameter.param_schema if isinstance(schema, Reference): schema = spec.get_referenced_schema(schema) elif schema is None: return None elif not isinstance(schema, Schema): raise ValueError(f"Error dereferencing schema: {schema}") return schema @staticmethod def is_supported_location(location: str) -> bool: """Return whether the provided location is supported.""" try: return APIPropertyLocation.from_str(location) in SUPPORTED_LOCATIONS except ValueError: return False @classmethod def from_parameter(cls, parameter: Parameter, spec: OpenAPISpec) -> "APIProperty": """Instantiate from an OpenAPI Parameter.""" location = APIPropertyLocation.from_str(parameter.param_in) cls._validate_location( location, parameter.name, ) cls._validate_content(parameter.content)
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
11bbba842a4f-4
location, parameter.name, ) cls._validate_content(parameter.content) schema = cls._get_schema(parameter, spec) schema_type = cls._get_schema_type(parameter, schema) default_val = schema.default if schema is not None else None return cls( name=parameter.name, location=location, default=default_val, description=parameter.description, required=parameter.required, type=schema_type, ) class APIRequestBodyProperty(APIPropertyBase): """A model for a request body property.""" properties: List["APIRequestBodyProperty"] = Field(alias="properties") """The sub-properties of the property.""" # This is useful for handling nested property cycles. # We can define separate types in that case. references_used: List[str] = Field(alias="references_used") """The references used by the property.""" @classmethod def _process_object_schema( cls, schema: Schema, spec: OpenAPISpec, references_used: List[str] ) -> Tuple[Union[str, List[str], None], List["APIRequestBodyProperty"]]: properties = [] required_props = schema.required or [] if schema.properties is None: raise ValueError( f"No properties found when processing object schema: {schema}" ) for prop_name, prop_schema in schema.properties.items(): if isinstance(prop_schema, Reference): ref_name = prop_schema.ref.split("/")[-1] if ref_name not in references_used: references_used.append(ref_name) prop_schema = spec.get_referenced_schema(prop_schema) else: continue properties.append( cls.from_schema( schema=prop_schema, name=prop_name,
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
11bbba842a4f-5
cls.from_schema( schema=prop_schema, name=prop_name, required=prop_name in required_props, spec=spec, references_used=references_used, ) ) return schema.type, properties @classmethod def _process_array_schema( cls, schema: Schema, name: str, spec: OpenAPISpec, references_used: List[str] ) -> str: items = schema.items if items is not None: if isinstance(items, Reference): ref_name = items.ref.split("/")[-1] if ref_name not in references_used: references_used.append(ref_name) items = spec.get_referenced_schema(items) else: pass return f"Array<{ref_name}>" else: pass if isinstance(items, Schema): array_type = cls.from_schema( schema=items, name=f"{name}Item", required=True, # TODO: Add required spec=spec, references_used=references_used, ) return f"Array<{array_type.type}>" return "array" @classmethod def from_schema( cls, schema: Schema, name: str, required: bool, spec: OpenAPISpec, references_used: Optional[List[str]] = None, ) -> "APIRequestBodyProperty": """Recursively populate from an OpenAPI Schema.""" if references_used is None: references_used = [] schema_type = schema.type properties: List[APIRequestBodyProperty] = [] if schema_type == "object" and schema.properties: schema_type, properties = cls._process_object_schema(
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
11bbba842a4f-6
schema_type, properties = cls._process_object_schema( schema, spec, references_used ) elif schema_type == "array": schema_type = cls._process_array_schema(schema, name, spec, references_used) elif schema_type in PRIMITIVE_TYPES: # Use the primitive type directly pass elif schema_type is None: # No typing specified/parsed. WIll map to 'any' pass else: raise ValueError(f"Unsupported type: {schema_type}") return cls( name=name, required=required, type=schema_type, default=schema.default, description=schema.description, properties=properties, references_used=references_used, ) class APIRequestBody(BaseModel): """A model for a request body.""" description: Optional[str] = Field(alias="description") """The description of the request body.""" properties: List[APIRequestBodyProperty] = Field(alias="properties") # E.g., application/json - we only support JSON at the moment. media_type: str = Field(alias="media_type") """The media type of the request body.""" @classmethod def _process_supported_media_type( cls, media_type_obj: MediaType, spec: OpenAPISpec, ) -> List[APIRequestBodyProperty]: """Process the media type of the request body.""" references_used = [] schema = media_type_obj.media_type_schema if isinstance(schema, Reference): references_used.append(schema.ref.split("/")[-1]) schema = spec.get_referenced_schema(schema) if schema is None: raise ValueError( f"Could not resolve schema for media type: {media_type_obj}"
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
11bbba842a4f-7
f"Could not resolve schema for media type: {media_type_obj}" ) api_request_body_properties = [] required_properties = schema.required or [] if schema.type == "object" and schema.properties: for prop_name, prop_schema in schema.properties.items(): if isinstance(prop_schema, Reference): prop_schema = spec.get_referenced_schema(prop_schema) api_request_body_properties.append( APIRequestBodyProperty.from_schema( schema=prop_schema, name=prop_name, required=prop_name in required_properties, spec=spec, ) ) else: api_request_body_properties.append( APIRequestBodyProperty( name="body", required=True, type=schema.type, default=schema.default, description=schema.description, properties=[], references_used=references_used, ) ) return api_request_body_properties @classmethod def from_request_body( cls, request_body: RequestBody, spec: OpenAPISpec ) -> "APIRequestBody": """Instantiate from an OpenAPI RequestBody.""" properties = [] for media_type, media_type_obj in request_body.content.items(): if media_type not in _SUPPORTED_MEDIA_TYPES: continue api_request_body_properties = cls._process_supported_media_type( media_type_obj, spec, ) properties.extend(api_request_body_properties) return cls( description=request_body.description, properties=properties, media_type=media_type, ) [docs]class APIOperation(BaseModel): """A model for a single API operation.""" operation_id: str = Field(alias="operation_id")
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
11bbba842a4f-8
operation_id: str = Field(alias="operation_id") """The unique identifier of the operation.""" description: Optional[str] = Field(alias="description") """The description of the operation.""" base_url: str = Field(alias="base_url") """The base URL of the operation.""" path: str = Field(alias="path") """The path of the operation.""" method: HTTPVerb = Field(alias="method") """The HTTP method of the operation.""" properties: Sequence[APIProperty] = Field(alias="properties") # TODO: Add parse in used components to be able to specify what type of # referenced object it is. # """The properties of the operation.""" # components: Dict[str, BaseModel] = Field(alias="components") request_body: Optional[APIRequestBody] = Field(alias="request_body") """The request body of the operation.""" @staticmethod def _get_properties_from_parameters( parameters: List[Parameter], spec: OpenAPISpec ) -> List[APIProperty]: """Get the properties of the operation.""" properties = [] for param in parameters: if APIProperty.is_supported_location(param.param_in): properties.append(APIProperty.from_parameter(param, spec)) elif param.required: raise ValueError( INVALID_LOCATION_TEMPL.format( location=param.param_in, name=param.name ) ) else: logger.warning( INVALID_LOCATION_TEMPL.format( location=param.param_in, name=param.name ) + " Ignoring optional parameter" ) pass return properties [docs] @classmethod def from_openapi_url( cls, spec_url: str,
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
11bbba842a4f-9
def from_openapi_url( cls, spec_url: str, path: str, method: str, ) -> "APIOperation": """Create an APIOperation from an OpenAPI URL.""" spec = OpenAPISpec.from_url(spec_url) return cls.from_openapi_spec(spec, path, method) [docs] @classmethod def from_openapi_spec( cls, spec: OpenAPISpec, path: str, method: str, ) -> "APIOperation": """Create an APIOperation from an OpenAPI spec.""" operation = spec.get_operation(path, method) parameters = spec.get_parameters_for_operation(operation) properties = cls._get_properties_from_parameters(parameters, spec) operation_id = OpenAPISpec.get_cleaned_operation_id(operation, path, method) request_body = spec.get_request_body_for_operation(operation) api_request_body = ( APIRequestBody.from_request_body(request_body, spec) if request_body is not None else None ) description = operation.description or operation.summary if not description and spec.paths is not None: description = spec.paths[path].description or spec.paths[path].summary return cls( operation_id=operation_id, description=description, base_url=spec.base_url, path=path, method=method, properties=properties, request_body=api_request_body, ) [docs] @staticmethod def ts_type_from_python(type_: SCHEMA_TYPE) -> str: if type_ is None: # TODO: Handle Nones better. These often result when # parsing specs that are < v3 return "any"
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
11bbba842a4f-10
# parsing specs that are < v3 return "any" elif isinstance(type_, str): return { "str": "string", "integer": "number", "float": "number", "date-time": "string", }.get(type_, type_) elif isinstance(type_, tuple): return f"Array<{APIOperation.ts_type_from_python(type_[0])}>" elif isinstance(type_, type) and issubclass(type_, Enum): return " | ".join([f"'{e.value}'" for e in type_]) else: return str(type_) def _format_nested_properties( self, properties: List[APIRequestBodyProperty], indent: int = 2 ) -> str: """Format nested properties.""" formatted_props = [] for prop in properties: prop_name = prop.name prop_type = self.ts_type_from_python(prop.type) prop_required = "" if prop.required else "?" prop_desc = f"/* {prop.description} */" if prop.description else "" if prop.properties: nested_props = self._format_nested_properties( prop.properties, indent + 2 ) prop_type = f"{{\n{nested_props}\n{' ' * indent}}}" formatted_props.append( f"{prop_desc}\n{' ' * indent}{prop_name}{prop_required}: {prop_type}," ) return "\n".join(formatted_props) [docs] def to_typescript(self) -> str: """Get typescript string representation of the operation.""" operation_name = self.operation_id params = [] if self.request_body: formatted_request_body_props = self._format_nested_properties(
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
11bbba842a4f-11
if self.request_body: formatted_request_body_props = self._format_nested_properties( self.request_body.properties ) params.append(formatted_request_body_props) for prop in self.properties: prop_name = prop.name prop_type = self.ts_type_from_python(prop.type) prop_required = "" if prop.required else "?" prop_desc = f"/* {prop.description} */" if prop.description else "" params.append(f"{prop_desc}\n\t\t{prop_name}{prop_required}: {prop_type},") formatted_params = "\n".join(params).strip() description_str = f"/* {self.description} */" if self.description else "" typescript_definition = f""" {description_str} type {operation_name} = (_: {{ {formatted_params} }}) => any; """ return typescript_definition.strip() @property def query_params(self) -> List[str]: return [ property.name for property in self.properties if property.location == APIPropertyLocation.QUERY ] @property def path_params(self) -> List[str]: return [ property.name for property in self.properties if property.location == APIPropertyLocation.PATH ] @property def body_params(self) -> List[str]: if self.request_body is None: return [] return [prop.name for prop in self.request_body.properties] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on May 28, 2023.
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/api_models.html
93568b333c3a-0
Source code for langchain.tools.openapi.utils.openapi_utils """Utility functions for parsing an OpenAPI spec.""" import copy import json import logging import re from enum import Enum from pathlib import Path from typing import Dict, List, Optional, Union import requests import yaml from openapi_schema_pydantic import ( Components, OpenAPI, Operation, Parameter, PathItem, Paths, Reference, RequestBody, Schema, ) from pydantic import ValidationError logger = logging.getLogger(__name__) class HTTPVerb(str, Enum): """HTTP verbs.""" GET = "get" PUT = "put" POST = "post" DELETE = "delete" OPTIONS = "options" HEAD = "head" PATCH = "patch" TRACE = "trace" @classmethod def from_str(cls, verb: str) -> "HTTPVerb": """Parse an HTTP verb.""" try: return cls(verb) except ValueError: raise ValueError(f"Invalid HTTP verb. Valid values are {cls.__members__}") [docs]class OpenAPISpec(OpenAPI): """OpenAPI Model that removes misformatted parts of the spec.""" @property def _paths_strict(self) -> Paths: if not self.paths: raise ValueError("No paths found in spec") return self.paths def _get_path_strict(self, path: str) -> PathItem: path_item = self._paths_strict.get(path) if not path_item: raise ValueError(f"No path found for {path}") return path_item @property def _components_strict(self) -> Components:
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/openapi_utils.html
93568b333c3a-1
return path_item @property def _components_strict(self) -> Components: """Get components or err.""" if self.components is None: raise ValueError("No components found in spec. ") return self.components @property def _parameters_strict(self) -> Dict[str, Union[Parameter, Reference]]: """Get parameters or err.""" parameters = self._components_strict.parameters if parameters is None: raise ValueError("No parameters found in spec. ") return parameters @property def _schemas_strict(self) -> Dict[str, Schema]: """Get the dictionary of schemas or err.""" schemas = self._components_strict.schemas if schemas is None: raise ValueError("No schemas found in spec. ") return schemas @property def _request_bodies_strict(self) -> Dict[str, Union[RequestBody, Reference]]: """Get the request body or err.""" request_bodies = self._components_strict.requestBodies if request_bodies is None: raise ValueError("No request body found in spec. ") return request_bodies def _get_referenced_parameter(self, ref: Reference) -> Union[Parameter, Reference]: """Get a parameter (or nested reference) or err.""" ref_name = ref.ref.split("/")[-1] parameters = self._parameters_strict if ref_name not in parameters: raise ValueError(f"No parameter found for {ref_name}") return parameters[ref_name] def _get_root_referenced_parameter(self, ref: Reference) -> Parameter: """Get the root reference or err.""" parameter = self._get_referenced_parameter(ref) while isinstance(parameter, Reference):
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/openapi_utils.html
93568b333c3a-2
parameter = self._get_referenced_parameter(ref) while isinstance(parameter, Reference): parameter = self._get_referenced_parameter(parameter) return parameter [docs] def get_referenced_schema(self, ref: Reference) -> Schema: """Get a schema (or nested reference) or err.""" ref_name = ref.ref.split("/")[-1] schemas = self._schemas_strict if ref_name not in schemas: raise ValueError(f"No schema found for {ref_name}") return schemas[ref_name] def _get_root_referenced_schema(self, ref: Reference) -> Schema: """Get the root reference or err.""" schema = self.get_referenced_schema(ref) while isinstance(schema, Reference): schema = self.get_referenced_schema(schema) return schema def _get_referenced_request_body( self, ref: Reference ) -> Optional[Union[Reference, RequestBody]]: """Get a request body (or nested reference) or err.""" ref_name = ref.ref.split("/")[-1] request_bodies = self._request_bodies_strict if ref_name not in request_bodies: raise ValueError(f"No request body found for {ref_name}") return request_bodies[ref_name] def _get_root_referenced_request_body( self, ref: Reference ) -> Optional[RequestBody]: """Get the root request Body or err.""" request_body = self._get_referenced_request_body(ref) while isinstance(request_body, Reference): request_body = self._get_referenced_request_body(request_body) return request_body @staticmethod def _alert_unsupported_spec(obj: dict) -> None: """Alert if the spec is not supported."""
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/openapi_utils.html
93568b333c3a-3
"""Alert if the spec is not supported.""" warning_message = ( " This may result in degraded performance." + " Convert your OpenAPI spec to 3.1.* spec" + " for better support." ) swagger_version = obj.get("swagger") openapi_version = obj.get("openapi") if isinstance(openapi_version, str): if openapi_version != "3.1.0": logger.warning( f"Attempting to load an OpenAPI {openapi_version}" f" spec. {warning_message}" ) else: pass elif isinstance(swagger_version, str): logger.warning( f"Attempting to load a Swagger {swagger_version}" f" spec. {warning_message}" ) else: raise ValueError( "Attempting to load an unsupported spec:" f"\n\n{obj}\n{warning_message}" ) [docs] @classmethod def parse_obj(cls, obj: dict) -> "OpenAPISpec": try: cls._alert_unsupported_spec(obj) return super().parse_obj(obj) except ValidationError as e: # We are handling possibly misconfigured specs and want to do a best-effort # job to get a reasonable interface out of it. new_obj = copy.deepcopy(obj) for error in e.errors(): keys = error["loc"] item = new_obj for key in keys[:-1]: item = item[key] item.pop(keys[-1], None) return cls.parse_obj(new_obj) [docs] @classmethod def from_spec_dict(cls, spec_dict: dict) -> "OpenAPISpec":
https://python.langchain.com/en/latest/_modules/langchain/tools/openapi/utils/openapi_utils.html