id
stringlengths
14
16
text
stringlengths
29
2.73k
source
stringlengths
49
115
c31a3c84efc8-66
field verbose: bool [Optional]# Whether to print out response text. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) β†’ Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = β€˜allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) β†’ Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-67
Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) β†’ Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) β†’ int[source]# Calculate num tokens with tiktoken package. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) β†’ int# Get the number of tokens in the message. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) β†’ unicode#
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-68
Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). save(file_path: Union[pathlib.Path, str]) β†’ None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) β†’ None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.Petals[source]# Wrapper around Petals Bloom models. To use, you should have the petals python package installed, and the environment variable HUGGINGFACE_API_KEY set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example Validators build_extra Β» all fields raise_deprecation Β» all fields set_verbose Β» verbose validate_environment Β» all fields field client: Any = None# The client to use for the API calls. field do_sample: bool = True# Whether or not to use sampling; use greedy decoding otherwise. field max_length: Optional[int] = None# The maximum length of the sequence to be generated. field max_new_tokens: int = 256# The maximum number of new tokens to generate in the completion. field model_kwargs: Dict[str, Any] [Optional]# Holds any model parameters valid for create call not explicitly specified. field model_name: str = 'bigscience/bloom-petals'# The model to use. field temperature: float = 0.7# What sampling temperature to use field tokenizer: Any = None#
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-69
What sampling temperature to use field tokenizer: Any = None# The tokenizer to use for the API calls. field top_k: Optional[int] = None# The number of highest probability vocabulary tokens to keep for top-k-filtering. field top_p: float = 0.9# The cumulative probability for top-p sampling. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) β†’ Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = β€˜allow’ was set since it adds all passed values
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-70
Behaves as if Config.extra = β€˜allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) β†’ Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) β†’ Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) β†’ int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) β†’ int# Get the number of tokens in the message.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-71
Get the number of tokens in the message. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) β†’ unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). save(file_path: Union[pathlib.Path, str]) β†’ None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) β†’ None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.PipelineAI[source]# Wrapper around PipelineAI large language models. To use, you should have the pipeline-ai python package installed, and the environment variable PIPELINE_API_KEY set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example Validators build_extra Β» all fields raise_deprecation Β» all fields set_verbose Β» verbose validate_environment Β» all fields field pipeline_key: str = ''# The id or tag of the target pipeline field pipeline_kwargs: Dict[str, Any] [Optional]#
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-72
field pipeline_kwargs: Dict[str, Any] [Optional]# Holds any pipeline parameters valid for create call not explicitly specified. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) β†’ Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = β€˜allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) β†’ Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-73
Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) β†’ Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) β†’ int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) β†’ int# Get the number of tokens in the message. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) β†’ unicode#
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-74
Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). save(file_path: Union[pathlib.Path, str]) β†’ None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) β†’ None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.PredictionGuard[source]# Wrapper around Prediction Guard large language models. To use, you should have the predictionguard python package installed, and the environment variable PREDICTIONGUARD_TOKEN set with your access token, or pass it as a named parameter to the constructor. .. rubric:: Example Validators raise_deprecation Β» all fields set_verbose Β» verbose validate_environment Β» all fields field max_tokens: int = 256# Denotes the number of tokens to predict per generation. field name: Optional[str] = 'default-text-gen'# Proxy name to use. field temperature: float = 0.75# A non-negative float that tunes the degree of randomness in generation. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ str# Check Cache and run the LLM on the given prompt and input.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-75
Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) β†’ Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = β€˜allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) β†’ Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) β†’ Dict#
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-76
Returns new model instance dict(**kwargs: Any) β†’ Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) β†’ int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) β†’ int# Get the number of tokens in the message. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) β†’ unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). save(file_path: Union[pathlib.Path, str]) β†’ None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example:
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-77
Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) β†’ None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.PromptLayerOpenAI[source]# Wrapper around OpenAI large language models. To use, you should have the openai and promptlayer python package installed, and the environment variable OPENAI_API_KEY and PROMPTLAYER_API_KEY set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAI LLM can also be passed here. The PromptLayerOpenAI LLM adds two optional :param pl_tags: List of strings to tag the request with. :param return_pl_id: If True, the PromptLayer request ID will be returned in the generation_info field of the Generation object. Example from langchain.llms import PromptLayerOpenAI openai = PromptLayerOpenAI(model_name="text-davinci-003") Validators build_extra Β» all fields raise_deprecation Β» all fields set_verbose Β» verbose validate_environment Β» all fields __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-78
Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) β†’ Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = β€˜allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) β†’ Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance create_llm_result(choices: Any, prompts: List[str], token_usage: Dict[str, int]) β†’ langchain.schema.LLMResult# Create the LLMResult from the choices and prompts. dict(**kwargs: Any) β†’ Dict# Return a dictionary of the LLM.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-79
dict(**kwargs: Any) β†’ Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) β†’ int# Calculate num tokens with tiktoken package. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) β†’ int# Get the number of tokens in the message. get_sub_prompts(params: Dict[str, Any], prompts: List[str], stop: Optional[List[str]] = None) β†’ List[List[str]]# Get the sub prompts for llm call. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) β†’ unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-80
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). max_tokens_for_prompt(prompt: str) β†’ int# Calculate the maximum number of tokens possible to generate for a prompt. Parameters prompt – The prompt to pass into the model. Returns The maximum number of tokens to generate for a prompt. Example max_tokens = openai.max_token_for_prompt("Tell me a joke.") modelname_to_contextsize(modelname: str) β†’ int# Calculate the maximum number of tokens possible to generate for a model. Parameters modelname – The modelname we want to know the context size for. Returns The maximum context size Example max_tokens = openai.modelname_to_contextsize("text-davinci-003") prep_streaming_params(stop: Optional[List[str]] = None) β†’ Dict[str, Any]# Prepare the params for streaming. save(file_path: Union[pathlib.Path, str]) β†’ None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) stream(prompt: str, stop: Optional[List[str]] = None) β†’ Generator# Call OpenAI with streaming flag and return the resulting generator. BETA: this is a beta feature while we figure out the right abstraction. Once that happens, this interface could change. Parameters prompt – The prompts to pass into the model. stop – Optional list of stop words to use when generating. Returns A generator representing the stream of tokens from OpenAI. Example generator = openai.stream("Tell me a joke.") for token in generator: yield token classmethod update_forward_refs(**localns: Any) β†’ None#
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-81
yield token classmethod update_forward_refs(**localns: Any) β†’ None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.PromptLayerOpenAIChat[source]# Wrapper around OpenAI large language models. To use, you should have the openai and promptlayer python package installed, and the environment variable OPENAI_API_KEY and PROMPTLAYER_API_KEY set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAIChat LLM can also be passed here. The PromptLayerOpenAIChat adds two optional :param pl_tags: List of strings to tag the request with. :param return_pl_id: If True, the PromptLayer request ID will be returned in the generation_info field of the Generation object. Example from langchain.llms import PromptLayerOpenAIChat openaichat = PromptLayerOpenAIChat(model_name="gpt-3.5-turbo") Validators build_extra Β» all fields raise_deprecation Β» all fields set_verbose Β» verbose validate_environment Β» all fields field allowed_special: Union[Literal['all'], AbstractSet[str]] = {}# Set of special tokens that are allowed。 field disallowed_special: Union[Literal['all'], Collection[str]] = 'all'# Set of special tokens that are not allowed。 field max_retries: int = 6# Maximum number of retries to make when generating. field model_kwargs: Dict[str, Any] [Optional]# Holds any model parameters valid for create call not explicitly specified. field model_name: str = 'gpt-3.5-turbo'# Model name to use. field prefix_messages: List [Optional]# Series of messages for Chat input.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-82
field prefix_messages: List [Optional]# Series of messages for Chat input. field streaming: bool = False# Whether to stream the results or not. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) β†’ Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = β€˜allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) β†’ Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-83
Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) β†’ Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) β†’ int# Calculate num tokens with tiktoken package. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) β†’ int# Get the number of tokens in the message.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-84
Get the number of tokens in the message. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) β†’ unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). save(file_path: Union[pathlib.Path, str]) β†’ None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) β†’ None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.RWKV[source]# Wrapper around RWKV language models. To use, you should have the rwkv python package installed, the pre-trained model file, and the model’s config information. Example from langchain.llms import RWKV model = RWKV(model="./models/rwkv-3b-fp16.bin", strategy="cpu fp32") # Simplest invocation response = model("Once upon a time, ") Validators raise_deprecation Β» all fields set_verbose Β» verbose validate_environment Β» all fields field CHUNK_LEN: int = 256# Batch size for prompt processing.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-85
field CHUNK_LEN: int = 256# Batch size for prompt processing. field max_tokens_per_generation: int = 256# Maximum number of tokens to generate. field model: str [Required]# Path to the pre-trained RWKV model file. field penalty_alpha_frequency: float = 0.4# Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model’s likelihood to repeat the same line verbatim.. field penalty_alpha_presence: float = 0.4# Positive values penalize new tokens based on whether they appear in the text so far, increasing the model’s likelihood to talk about new topics.. field rwkv_verbose: bool = True# Print debug information. field strategy: str = 'cpu fp32'# Token context window. field temperature: float = 1.0# The temperature to use for sampling. field tokens_path: str [Required]# Path to the RWKV tokens file. field top_p: float = 0.5# The top-p value to use for sampling. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-86
Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) β†’ Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = β€˜allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) β†’ Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) β†’ Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult#
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-87
Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) β†’ int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) β†’ int# Get the number of tokens in the message. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) β†’ unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). save(file_path: Union[pathlib.Path, str]) β†’ None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) β†’ None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.Replicate[source]# Wrapper around Replicate models.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-88
Wrapper around Replicate models. To use, you should have the replicate python package installed, and the environment variable REPLICATE_API_TOKEN set with your API token. You can find your token here: https://replicate.com/account The model param is required, but any other model parameters can also be passed in with the format input={model_param: value, …} Example Validators build_extra Β» all fields raise_deprecation Β» all fields set_verbose Β» verbose validate_environment Β» all fields __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) β†’ Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = β€˜allow’ was set since it adds all passed values
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-89
Behaves as if Config.extra = β€˜allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) β†’ Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) β†’ Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) β†’ int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) β†’ int# Get the number of tokens in the message.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-90
Get the number of tokens in the message. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) β†’ unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). save(file_path: Union[pathlib.Path, str]) β†’ None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) β†’ None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.SagemakerEndpoint[source]# Wrapper around custom Sagemaker Inference Endpoints. To use, you must supply the endpoint name from your deployed Sagemaker model & the region where it is deployed. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Sagemaker endpoint.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-91
Make sure the credentials / roles used have the required policies to access the Sagemaker endpoint. See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html Validators raise_deprecation Β» all fields set_verbose Β» verbose validate_environment Β» all fields field content_handler: langchain.llms.sagemaker_endpoint.LLMContentHandler [Required]# The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. field credentials_profile_name: Optional[str] = None# The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html field endpoint_kwargs: Optional[Dict] = None# Optional attributes passed to the invoke_endpoint function. See `boto3`_. docs for more info. .. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html> field endpoint_name: str = ''# The name of the endpoint from the deployed Sagemaker model. Must be unique within an AWS Region. field model_kwargs: Optional[Dict] = None# Key word arguments to pass to the model. field region_name: str = ''# The aws region where the Sagemaker model is deployed, eg. us-west-2. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ str# Check Cache and run the LLM on the given prompt and input.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-92
Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) β†’ Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = β€˜allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) β†’ Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) β†’ Dict#
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-93
Returns new model instance dict(**kwargs: Any) β†’ Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) β†’ int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) β†’ int# Get the number of tokens in the message. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) β†’ unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). save(file_path: Union[pathlib.Path, str]) β†’ None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example:
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-94
Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) β†’ None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.SelfHostedHuggingFaceLLM[source]# Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the runhouse python package installed. Only supports text-generation and text2text-generation for now. Example using from_model_id:from langchain.llms import SelfHostedHuggingFaceLLM import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") hf = SelfHostedHuggingFaceLLM( model_id="google/flan-t5-large", task="text2text-generation", hardware=gpu ) Example passing fn that generates a pipeline (bc the pipeline is not serializable):from langchain.llms import SelfHostedHuggingFaceLLM from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh def get_pipeline(): model_id = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer ) return pipe
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-95
"text-generation", model=model, tokenizer=tokenizer ) return pipe hf = SelfHostedHuggingFaceLLM( model_load_fn=get_pipeline, model_id="gpt2", hardware=gpu) Validators raise_deprecation Β» all fields set_verbose Β» verbose field device: int = 0# Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc. field hardware: Any = None# Remote hardware to send the inference function to. field inference_fn: Callable = <function _generate_text># Inference function to send to the remote hardware. field load_fn_kwargs: Optional[dict] = None# Key word arguments to pass to the model load function. field model_id: str = 'gpt2'# Hugging Face model_id to load the model. field model_kwargs: Optional[dict] = None# Key word arguments to pass to the model. field model_load_fn: Callable = <function _load_transformer># Function to load the model remotely on the server. field model_reqs: List[str] = ['./', 'transformers', 'torch']# Requirements to install on hardware to inference the model. field task: str = 'text-generation'# Hugging Face task (either β€œtext-generation” or β€œtext2text-generation”). __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ str# Check Cache and run the LLM on the given prompt and input.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-96
Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) β†’ Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = β€˜allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) β†’ Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) β†’ Dict#
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-97
Returns new model instance dict(**kwargs: Any) β†’ Dict# Return a dictionary of the LLM. classmethod from_pipeline(pipeline: Any, hardware: Any, model_reqs: Optional[List[str]] = None, device: int = 0, **kwargs: Any) β†’ langchain.llms.base.LLM# Init the SelfHostedPipeline from a pipeline object or string. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) β†’ int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) β†’ int# Get the number of tokens in the message. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) β†’ unicode# Generate a JSON representation of the model, include and exclude arguments as per dict().
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-98
Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). save(file_path: Union[pathlib.Path, str]) β†’ None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) β†’ None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.SelfHostedPipeline[source]# Run model inference on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the runhouse python package installed. Example for custom pipeline and inference functions:from langchain.llms import SelfHostedPipeline from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh def load_pipeline(): tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelForCausalLM.from_pretrained("gpt2") return pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10 ) def inference_fn(pipeline, prompt, stop = None): return pipeline(prompt)[0]["generated_text"] gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") llm = SelfHostedPipeline( model_load_fn=load_pipeline, hardware=gpu,
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-99
model_load_fn=load_pipeline, hardware=gpu, model_reqs=model_reqs, inference_fn=inference_fn ) Example for <2GB model (can be serialized and sent directly to the server):from langchain.llms import SelfHostedPipeline import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") my_model = ... llm = SelfHostedPipeline.from_pipeline( pipeline=my_model, hardware=gpu, model_reqs=["./", "torch", "transformers"], ) Example passing model path for larger models:from langchain.llms import SelfHostedPipeline import runhouse as rh import pickle from transformers import pipeline generator = pipeline(model="gpt2") rh.blob(pickle.dumps(generator), path="models/pipeline.pkl" ).save().to(gpu, path="models") llm = SelfHostedPipeline.from_pipeline( pipeline="models/pipeline.pkl", hardware=gpu, model_reqs=["./", "torch", "transformers"], ) Validators raise_deprecation Β» all fields set_verbose Β» verbose field hardware: Any = None# Remote hardware to send the inference function to. field inference_fn: Callable = <function _generate_text># Inference function to send to the remote hardware. field load_fn_kwargs: Optional[dict] = None# Key word arguments to pass to the model load function. field model_load_fn: Callable [Required]# Function to load the model remotely on the server. field model_reqs: List[str] = ['./', 'torch']# Requirements to install on hardware to inference the model.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-100
Requirements to install on hardware to inference the model. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) β†’ Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = β€˜allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) β†’ Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-101
exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) β†’ Dict# Return a dictionary of the LLM. classmethod from_pipeline(pipeline: Any, hardware: Any, model_reqs: Optional[List[str]] = None, device: int = 0, **kwargs: Any) β†’ langchain.llms.base.LLM[source]# Init the SelfHostedPipeline from a pipeline object or string. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) β†’ int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) β†’ int# Get the number of tokens in the message.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-102
Get the number of tokens in the message. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) β†’ unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). save(file_path: Union[pathlib.Path, str]) β†’ None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) β†’ None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.StochasticAI[source]# Wrapper around StochasticAI large language models. To use, you should have the environment variable STOCHASTICAI_API_KEY set with your API key. Example from langchain.llms import StochasticAI stochasticai = StochasticAI(api_url="") Validators build_extra Β» all fields raise_deprecation Β» all fields set_verbose Β» verbose validate_environment Β» all fields field api_url: str = ''# Model name to use. field model_kwargs: Dict[str, Any] [Optional]# Holds any model parameters valid for create call not explicitly specified.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-103
Holds any model parameters valid for create call not explicitly specified. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) β†’ Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = β€˜allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) β†’ Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-104
exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) β†’ Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) β†’ int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) β†’ int# Get the number of tokens in the message. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) β†’ unicode#
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-105
Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). save(file_path: Union[pathlib.Path, str]) β†’ None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) β†’ None# Try to update ForwardRefs on fields based on this Model, globalns and localns. pydantic model langchain.llms.Writer[source]# Wrapper around Writer large language models. To use, you should have the environment variable WRITER_API_KEY set with your API key. Example from langchain import Writer writer = Writer(model_id="palmyra-base") Validators raise_deprecation Β» all fields set_verbose Β» verbose validate_environment Β» all fields field base_url: Optional[str] = None# Base url to use, if None decides based on model name. field beam_search_diversity_rate: float = 1.0# Only applies to beam search, i.e. when the beam width is >1. A higher value encourages beam search to return a more diverse set of candidates field beam_width: Optional[int] = None# The number of concurrent candidates to keep track of during beam search field length: int = 256# The maximum number of tokens to generate in the completion. field length_pentaly: float = 1.0# Only applies to beam search, i.e. when the beam width is >1. Larger values penalize long candidates more heavily, thus preferring shorter candidates field logprobs: bool = False# Whether to return log probabilities.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-106
shorter candidates field logprobs: bool = False# Whether to return log probabilities. field model_id: str = 'palmyra-base'# Model name to use. field random_seed: int = 0# The model generates random results. Changing the random seed alone will produce a different response with similar characteristics. It is possible to reproduce results by fixing the random seed (assuming all other hyperparameters are also fixed) field repetition_penalty: float = 1.0# Penalizes repeated tokens according to frequency. field stop: Optional[List[str]] = None# Sequences when completion generation will stop field temperature: float = 1.0# What sampling temperature to use. field tokens_to_generate: int = 24# Max number of tokens to generate. field top_k: int = 1# The number of highest probability vocabulary tokens to keep for top-k-filtering. field top_p: float = 1.0# Total probability mass of tokens to consider at each step. __call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ str# Check Cache and run the LLM on the given prompt and input. async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Run the LLM on the given prompt and input.
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-107
Run the LLM on the given prompt and input. async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) β†’ Model# Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. Default values are respected, but no other validation is performed. Behaves as if Config.extra = β€˜allow’ was set since it adds all passed values copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) β†’ Model# Duplicate a model, optionally choose which fields to include, exclude and change. Parameters include – fields to include in new model exclude – fields to exclude from new model, as with values this takes precedence over include update – values to change/add in the new model. Note: the data is not validated before creating the new model: you should trust this data deep – set to True to make a deep copy of the model Returns new model instance dict(**kwargs: Any) β†’ Dict# Return a dictionary of the LLM. generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult#
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-108
Run the LLM on the given prompt and input. generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) β†’ langchain.schema.LLMResult# Take in a list of prompt values and return an LLMResult. get_num_tokens(text: str) β†’ int# Get the number of tokens present in the text. get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) β†’ int# Get the number of tokens in the message. json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) β†’ unicode# Generate a JSON representation of the model, include and exclude arguments as per dict(). encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps(). save(file_path: Union[pathlib.Path, str]) β†’ None# Save the LLM. Parameters file_path – Path to file to save the LLM to. Example: .. code-block:: python llm.save(file_path=”path/llm.yaml”) classmethod update_forward_refs(**localns: Any) β†’ None# Try to update ForwardRefs on fields based on this Model, globalns and localns. previous Writer next Chat Models By Harrison Chase
https://python.langchain.com/en/latest/reference/modules/llms.html
c31a3c84efc8-109
previous Writer next Chat Models By Harrison Chase Β© Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/reference/modules/llms.html
ade16a3d7a0b-0
.rst .pdf Text Splitter Text Splitter# Functionality for splitting text. class langchain.text_splitter.CharacterTextSplitter(separator: str = '\n\n', **kwargs: Any)[source]# Implementation of splitting text that looks at characters. split_text(text: str) β†’ List[str][source]# Split incoming text and return chunks. class langchain.text_splitter.LatexTextSplitter(**kwargs: Any)[source]# Attempts to split the text along Latex-formatted layout elements. class langchain.text_splitter.MarkdownTextSplitter(**kwargs: Any)[source]# Attempts to split the text along Markdown-formatted headings. class langchain.text_splitter.NLTKTextSplitter(separator: str = '\n\n', **kwargs: Any)[source]# Implementation of splitting text that looks at sentences using NLTK. split_text(text: str) β†’ List[str][source]# Split incoming text and return chunks. class langchain.text_splitter.PythonCodeTextSplitter(**kwargs: Any)[source]# Attempts to split the text along Python syntax. class langchain.text_splitter.RecursiveCharacterTextSplitter(separators: Optional[List[str]] = None, **kwargs: Any)[source]# Implementation of splitting text that looks at characters. Recursively tries to split by different characters to find one that works. split_text(text: str) β†’ List[str][source]# Split incoming text and return chunks. class langchain.text_splitter.SpacyTextSplitter(separator: str = '\n\n', pipeline: str = 'en_core_web_sm', **kwargs: Any)[source]# Implementation of splitting text that looks at sentences using Spacy. split_text(text: str) β†’ List[str][source]# Split incoming text and return chunks.
https://python.langchain.com/en/latest/reference/modules/text_splitter.html
ade16a3d7a0b-1
Split incoming text and return chunks. class langchain.text_splitter.TextSplitter(chunk_size: int = 4000, chunk_overlap: int = 200, length_function: typing.Callable[[str], int] = <built-in function len>)[source]# Interface for splitting text into chunks. async atransform_documents(documents: Sequence[langchain.schema.Document], **kwargs: Any) β†’ Sequence[langchain.schema.Document][source]# Asynchronously transform a sequence of documents by splitting them. create_documents(texts: List[str], metadatas: Optional[List[dict]] = None) β†’ List[langchain.schema.Document][source]# Create documents from a list of texts. classmethod from_huggingface_tokenizer(tokenizer: Any, **kwargs: Any) β†’ langchain.text_splitter.TextSplitter[source]# Text splitter that uses HuggingFace tokenizer to count length. classmethod from_tiktoken_encoder(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any) β†’ langchain.text_splitter.TextSplitter[source]# Text splitter that uses tiktoken encoder to count length. split_documents(documents: List[langchain.schema.Document]) β†’ List[langchain.schema.Document][source]# Split documents. abstract split_text(text: str) β†’ List[str][source]# Split text into multiple components. transform_documents(documents: Sequence[langchain.schema.Document], **kwargs: Any) β†’ Sequence[langchain.schema.Document][source]# Transform sequence of documents by splitting them.
https://python.langchain.com/en/latest/reference/modules/text_splitter.html
ade16a3d7a0b-2
Transform sequence of documents by splitting them. class langchain.text_splitter.TokenTextSplitter(encoding_name: str = 'gpt2', model_name: Optional[str] = None, allowed_special: Union[Literal['all'], AbstractSet[str]] = {}, disallowed_special: Union[Literal['all'], Collection[str]] = 'all', **kwargs: Any)[source]# Implementation of splitting text that looks at tokens. split_text(text: str) β†’ List[str][source]# Split incoming text and return chunks. previous Docstore next Document Loaders By Harrison Chase Β© Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/reference/modules/text_splitter.html
7d819383c871-0
.rst .pdf Agent Toolkits Agent Toolkits# Agent toolkits. pydantic model langchain.agents.agent_toolkits.FileManagementToolkit[source]# Toolkit for interacting with a Local Files. field root_dir: Optional[str] = None# If specified, all file operations are made relative to root_dir. field selected_tools: Optional[List[str]] = None# If provided, only provide the selected tools. Defaults to all. get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.JiraToolkit[source]# Jira Toolkit. field tools: List[langchain.tools.base.BaseTool] = []# classmethod from_jira_api_wrapper(jira_api_wrapper: langchain.utilities.jira.JiraAPIWrapper) β†’ langchain.agents.agent_toolkits.jira.toolkit.JiraToolkit[source]# get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.JsonToolkit[source]# Toolkit for interacting with a JSON spec. field spec: langchain.tools.json.tool.JsonSpec [Required]# get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.NLAToolkit[source]# Natural Language API Toolkit Definition. field nla_tools: Sequence[langchain.agents.agent_toolkits.nla.tool.NLATool] [Required]# List of API Endpoint Tools.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-1
List of API Endpoint Tools. classmethod from_llm_and_ai_plugin(llm: langchain.llms.base.BaseLLM, ai_plugin: langchain.tools.plugin.AIPlugin, requests: Optional[langchain.requests.Requests] = None, verbose: bool = False, **kwargs: Any) β†’ langchain.agents.agent_toolkits.nla.toolkit.NLAToolkit[source]# Instantiate the toolkit from an OpenAPI Spec URL classmethod from_llm_and_ai_plugin_url(llm: langchain.llms.base.BaseLLM, ai_plugin_url: str, requests: Optional[langchain.requests.Requests] = None, verbose: bool = False, **kwargs: Any) β†’ langchain.agents.agent_toolkits.nla.toolkit.NLAToolkit[source]# Instantiate the toolkit from an OpenAPI Spec URL classmethod from_llm_and_spec(llm: langchain.llms.base.BaseLLM, spec: langchain.tools.openapi.utils.openapi_utils.OpenAPISpec, requests: Optional[langchain.requests.Requests] = None, verbose: bool = False, **kwargs: Any) β†’ langchain.agents.agent_toolkits.nla.toolkit.NLAToolkit[source]# Instantiate the toolkit by creating tools for each operation. classmethod from_llm_and_url(llm: langchain.llms.base.BaseLLM, open_api_url: str, requests: Optional[langchain.requests.Requests] = None, verbose: bool = False, **kwargs: Any) β†’ langchain.agents.agent_toolkits.nla.toolkit.NLAToolkit[source]# Instantiate the toolkit from an OpenAPI Spec URL get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools for all the API operations. pydantic model langchain.agents.agent_toolkits.OpenAPIToolkit[source]# Toolkit for interacting with a OpenAPI api.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-2
Toolkit for interacting with a OpenAPI api. field json_agent: langchain.agents.agent.AgentExecutor [Required]# field requests_wrapper: langchain.requests.TextRequestsWrapper [Required]# classmethod from_llm(llm: langchain.llms.base.BaseLLM, json_spec: langchain.tools.json.tool.JsonSpec, requests_wrapper: langchain.requests.TextRequestsWrapper, **kwargs: Any) β†’ langchain.agents.agent_toolkits.openapi.toolkit.OpenAPIToolkit[source]# Create json agent from llm, then initialize. get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.PlayWrightBrowserToolkit[source]# Toolkit for web browser tools. field async_browser: Optional[AsyncBrowser] = None# field sync_browser: Optional[SyncBrowser] = None# classmethod from_browser(sync_browser: Optional[SyncBrowser] = None, async_browser: Optional[AsyncBrowser] = None) β†’ PlayWrightBrowserToolkit[source]# Instantiate the toolkit. get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.PowerBIToolkit[source]# Toolkit for interacting with PowerBI dataset. field callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None# field examples: Optional[str] = None# field llm: langchain.base_language.BaseLanguageModel [Required]# field powerbi: langchain.utilities.powerbi.PowerBIDataset [Required]# get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.SQLDatabaseToolkit[source]# Toolkit for interacting with SQL databases.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-3
Toolkit for interacting with SQL databases. field db: langchain.sql_database.SQLDatabase [Required]# field llm: langchain.base_language.BaseLanguageModel [Required]# get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. property dialect: str# Return string representation of dialect to use. pydantic model langchain.agents.agent_toolkits.VectorStoreInfo[source]# Information about a vectorstore. field description: str [Required]# field name: str [Required]# field vectorstore: langchain.vectorstores.base.VectorStore [Required]# pydantic model langchain.agents.agent_toolkits.VectorStoreRouterToolkit[source]# Toolkit for routing between vectorstores. field llm: langchain.base_language.BaseLanguageModel [Optional]# field vectorstores: List[langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreInfo] [Required]# get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.VectorStoreToolkit[source]# Toolkit for interacting with a vector store. field llm: langchain.base_language.BaseLanguageModel [Optional]# field vectorstore_info: langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreInfo [Required]# get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. pydantic model langchain.agents.agent_toolkits.ZapierToolkit[source]# Zapier Toolkit. field tools: List[langchain.tools.base.BaseTool] = []#
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-4
field tools: List[langchain.tools.base.BaseTool] = []# classmethod from_zapier_nla_wrapper(zapier_nla_wrapper: langchain.utilities.zapier.ZapierNLAWrapper) β†’ langchain.agents.agent_toolkits.zapier.toolkit.ZapierToolkit[source]# Create a toolkit from a ZapierNLAWrapper. get_tools() β†’ List[langchain.tools.base.BaseTool][source]# Get the tools in the toolkit. langchain.agents.agent_toolkits.create_csv_agent(llm: langchain.llms.base.BaseLLM, path: str, pandas_kwargs: Optional[dict] = None, **kwargs: Any) β†’ langchain.agents.agent.AgentExecutor[source]# Create csv agent by loading to a dataframe and using pandas agent.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-5
langchain.agents.agent_toolkits.create_json_agent(llm: langchain.llms.base.BaseLLM, toolkit: langchain.agents.agent_toolkits.json.toolkit.JsonToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to interact with JSON.\nYour goal is to return a final answer by interacting with the JSON.\nYou have access to the following tools which help you learn more about the JSON you are interacting with.\nOnly use the below tools. Only use the information returned by the below tools to construct your final answer.\nDo not make up any information that is not contained in the JSON.\nYour input to the tools should be in the form of `data["key"][0]` where `data` is the JSON blob you are interacting with, and the syntax used is Python. \nYou should only use keys that you know for a fact exist. You must validate that a key exists by seeing it previously when calling `json_spec_list_keys`. \nIf you have not seen a key in one of those responses, you cannot use it.\nYou should only add one key
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-6
you cannot use it.\nYou should only add one key at a time to the path. You cannot add multiple keys at once.\nIf you encounter a "KeyError", go back to the previous key, look at the available keys, and try again.\n\nIf the question does not seem to be related to the JSON, just return "I don\'t know" as the answer.\nAlways begin your interaction with the `json_spec_list_keys` tool with input "data" to see what keys exist in the JSON.\n\nNote that sometimes the value at a given path is large. In this case, you will get an error "Value is a large dictionary, should explore its keys directly".\nIn this case, you should ALWAYS follow up by using the `json_spec_list_keys` tool to see what keys exist at that path.\nDo not simply refer the user to the JSON or a section of the JSON, as this is not a valid answer. Keep digging until you find the answer and explicitly return it.\n', suffix: str = 'Begin!"\n\nQuestion: {input}\nThought:
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-7
str = 'Begin!"\n\nQuestion: {input}\nThought: I should look at the keys that exist in data to see what I have access to\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-8
Construct a json agent from an LLM and tools.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-9
langchain.agents.agent_toolkits.create_openapi_agent(llm: langchain.llms.base.BaseLLM, toolkit: langchain.agents.agent_toolkits.openapi.toolkit.OpenAPIToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = "You are an agent designed to answer questions by making web requests to an API given the openapi spec.\n\nIf the question does not seem related to the API, return I don't know. Do not make up an answer.\nOnly use information provided by the tools to construct your response.\n\nFirst, find the base URL needed to make the request.\n\nSecond, find the relevant paths needed to answer the question. Take note that, sometimes, you might need to make more than one request to more than one path to answer the question.\n\nThird, find the required parameters needed to make the request. For GET requests, these are usually URL parameters and for POST requests, these are request body parameters.\n\nFourth, make the requests needed to answer the question. Ensure that you are sending the correct parameters to the request by checking which parameters are required. For parameters with a
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-10
by checking which parameters are required. For parameters with a fixed set of values, please use the spec to look at which values are allowed.\n\nUse the exact parameter names as listed in the spec, do not make up any names or abbreviate the names of parameters.\nIf you get a not found error, ensure that you are using a path that actually exists in the spec.\n", suffix: str = 'Begin!\n\nQuestion: {input}\nThought: I should explore the spec to find the base url for the API.\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None, max_iterations: Optional[int] = 15,
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-11
= None, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', verbose: bool = False, return_intermediate_steps: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-12
Construct a json agent from an LLM and tools. langchain.agents.agent_toolkits.create_pandas_dataframe_agent(llm: langchain.llms.base.BaseLLM, df: Any, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = '\nYou are working with a pandas dataframe in Python. The name of the dataframe is `df`.\nYou should use the tools below to answer the question posed of you:', suffix: str = '\nThis is the result of `print(df.head())`:\n{df}\n\nBegin!\nQuestion: {input}\n{agent_scratchpad}', input_variables: Optional[List[str]] = None, verbose: bool = False, return_intermediate_steps: bool = False, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]# Construct a pandas agent from an LLM and dataframe.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-13
langchain.agents.agent_toolkits.create_pbi_agent(llm: langchain.llms.base.BaseLLM, toolkit: Optional[langchain.agents.agent_toolkits.powerbi.toolkit.PowerBIToolkit], powerbi: Optional[langchain.utilities.powerbi.PowerBIDataset] = None, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to interact with a Power BI Dataset.\nGiven an input question, create a syntactically correct DAX query to run, then look at the results of the query and return the answer.\nUnless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.\nYou can order the results by a relevant column to return the most interesting examples in the database.\nNever query for all the columns from a specific table, only ask for a the few relevant columns given the question.\n\nYou have access to tools for interacting with the Power BI Dataset. Only use the below tools. Only use the information returned by the below tools to construct your final answer. Usually I should first ask which tables I have, then
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-14
Usually I should first ask which tables I have, then how each table is defined and then ask the question to query tool to create a query for me and then I should ask the query tool to execute it, finally create a nice sentence that answers the question. If you receive an error back that mentions that the query was wrong try to phrase the question differently and get a new query from the question to query tool.\n\nIf the question does not seem related to the dataset, just return "I don\'t know" as the answer.\n', suffix: str = 'Begin!\n\nQuestion: {input}\nThought: I should first ask which tables I have, then how each table is defined and then ask the question to query tool to create a query for me and then I should ask the query tool to execute it, finally create a nice sentence that answers the question.\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-15
always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', examples: Optional[str] = None, input_variables: Optional[List[str]] = None, top_k: int = 10, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-16
Construct a pbi agent from an LLM and tools.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-17
langchain.agents.agent_toolkits.create_pbi_chat_agent(llm: langchain.chat_models.base.BaseChatModel, toolkit: Optional[langchain.agents.agent_toolkits.powerbi.toolkit.PowerBIToolkit], powerbi: Optional[langchain.utilities.powerbi.PowerBIDataset] = None, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, output_parser: Optional[langchain.agents.agent.AgentOutputParser] = None, prefix: str = 'Assistant is a large language model trained by OpenAI built to help users interact with a PowerBI Dataset.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-18
knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics. \n\nGiven an input question, create a syntactically correct DAX query to run, then look at the results of the query and return the answer. Unless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database.\n\nOverall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n\nUsually I should first ask which tables I have, then how each table is defined and then ask the question
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-19
how each table is defined and then ask the question to query tool to create a query for me and then I should ask the query tool to execute it, finally create a complete sentence that answers the question. If you receive an error back that mentions that the query was wrong try to phrase the question differently and get a new query from the question to query tool.\n', suffix: str = "TOOLS\n------\nAssistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are:\n\n{{tools}}\n\n{format_instructions}\n\nUSER'S INPUT\n--------------------\nHere is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):\n\n{{{{input}}}}\n", examples: Optional[str] = None, input_variables: Optional[List[str]] = None, memory: Optional[langchain.memory.chat_memory.BaseChatMemory] = None, top_k: int = 10, verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str,
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-20
Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-21
Construct a pbi agent from an Chat LLM and tools. If you supply only a toolkit and no powerbi dataset, the same LLM is used for both. langchain.agents.agent_toolkits.create_python_agent(llm: langchain.llms.base.BaseLLM, tool: langchain.tools.python.tool.PythonREPLTool, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, verbose: bool = False, prefix: str = 'You are an agent designed to write and execute python code to answer questions.\nYou have access to a python REPL, which you can use to execute python code.\nIf you get an error, debug your code and try again.\nOnly use the output of your code to answer the question. \nYou might know the answer without running any code, but you should still run the code to get the answer.\nIf it does not seem like you can write code to answer the question, just return "I don\'t know" as the answer.\n', agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]# Construct a python agent from an LLM and tool.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-22
langchain.agents.agent_toolkits.create_sql_agent(llm: langchain.llms.base.BaseLLM, toolkit: langchain.agents.agent_toolkits.sql.toolkit.SQLDatabaseToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to interact with a SQL database.\nGiven an input question, create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer.\nUnless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.\nYou can order the results by a relevant column to return the most interesting examples in the database.\nNever query for all the columns from a specific table, only ask for the relevant columns given the question.\nYou have access to tools for interacting with the database.\nOnly use the below tools. Only use the information returned by the below tools to construct your final answer.\nYou MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again.\n\nDO
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-23
a query, rewrite the query and try again.\n\nDO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.\n\nIf the question does not seem related to the database, just return "I don\'t know" as the answer.\n', suffix: str = 'Begin!\n\nQuestion: {input}\nThought: I should look at the tables in the database to see what I can query.\n{agent_scratchpad}', format_instructions: str = 'Use the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question', input_variables: Optional[List[str]] = None, top_k: int = 10, max_iterations: Optional[int] = 15, max_execution_time: Optional[float] = None, early_stopping_method: str = 'force', verbose: bool =
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-24
early_stopping_method: str = 'force', verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-25
Construct a sql agent from an LLM and tools. langchain.agents.agent_toolkits.create_vectorstore_agent(llm: langchain.llms.base.BaseLLM, toolkit: langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to answer questions about sets of documents.\nYou have access to tools for interacting with the documents, and the inputs to the tools are questions.\nSometimes, you will be asked to provide sources for your questions, in which case you should use the appropriate tool to do so.\nIf the question does not seem relevant to any of the tools provided, just return "I don\'t know" as the answer.\n', verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]# Construct a vectorstore agent from an LLM and tools. langchain.agents.agent_toolkits.create_vectorstore_router_agent(llm: langchain.llms.base.BaseLLM, toolkit: langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreRouterToolkit, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, prefix: str = 'You are an agent designed to answer questions.\nYou have access to tools for interacting with different sources, and the inputs to the tools are questions.\nYour main task is to decide which of the tools is relevant for answering question at hand.\nFor complex questions, you can break the question down into sub questions and use tools to answers the sub questions.\n', verbose: bool = False, agent_executor_kwargs: Optional[Dict[str, Any]] = None, **kwargs: Dict[str, Any]) β†’ langchain.agents.agent.AgentExecutor[source]#
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
7d819383c871-26
Construct a vectorstore router agent from an LLM and tools. previous Tools next Utilities By Harrison Chase Β© Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/reference/modules/agent_toolkits.html
12e811d1d4e4-0
.rst .pdf Document Transformers Document Transformers# Transform documents pydantic model langchain.document_transformers.EmbeddingsRedundantFilter[source]# Filter that drops redundant documents by comparing their embeddings. field embeddings: langchain.embeddings.base.Embeddings [Required]# Embeddings to use for embedding document contents. field similarity_fn: Callable = <function cosine_similarity># Similarity function for comparing documents. Function expected to take as input two matrices (List[List[float]]) and return a matrix of scores where higher values indicate greater similarity. field similarity_threshold: float = 0.95# Threshold for determining when two documents are similar enough to be considered redundant. async atransform_documents(documents: Sequence[langchain.schema.Document], **kwargs: Any) β†’ Sequence[langchain.schema.Document][source]# Asynchronously transform a list of documents. transform_documents(documents: Sequence[langchain.schema.Document], **kwargs: Any) β†’ Sequence[langchain.schema.Document][source]# Filter down documents. langchain.document_transformers.get_stateful_documents(documents: Sequence[langchain.schema.Document]) β†’ Sequence[langchain.document_transformers._DocumentWithState][source]# previous Document Compressors next Memory By Harrison Chase Β© Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/reference/modules/document_transformers.html
6401bc566761-0
.rst .pdf Document Loaders Document Loaders# All different types of document loaders. class langchain.document_loaders.AZLyricsLoader(web_path: Union[str, List[str]], header_template: Optional[dict] = None)[source]# Loader that loads AZLyrics webpages. load() β†’ List[langchain.schema.Document][source]# Load webpage. web_paths: List[str]# class langchain.document_loaders.AirbyteJSONLoader(file_path: str)[source]# Loader that loads local airbyte json files. load() β†’ List[langchain.schema.Document][source]# Load file. pydantic model langchain.document_loaders.ApifyDatasetLoader[source]# Logic for loading documents from Apify datasets. field apify_client: Any = None# field dataset_id: str [Required]# The ID of the dataset on the Apify platform. field dataset_mapping_function: Callable[[Dict], langchain.schema.Document] [Required]# A custom function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.ArxivLoader(query: str, load_max_docs: Optional[int] = 100, load_all_available_meta: Optional[bool] = False)[source]# Loads a query result from arxiv.org into a list of Documents. Each document represents one Document. The loader converts the original PDF format into the text. load() β†’ List[langchain.schema.Document][source]# Load data into document objects. class langchain.document_loaders.AzureBlobStorageContainerLoader(conn_str: str, container: str, prefix: str = '')[source]# Loading logic for loading documents from Azure Blob Storage.
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-1
Loading logic for loading documents from Azure Blob Storage. load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.AzureBlobStorageFileLoader(conn_str: str, container: str, blob_name: str)[source]# Loading logic for loading documents from Azure Blob Storage. load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.BSHTMLLoader(file_path: str, open_encoding: Optional[str] = None, bs_kwargs: Optional[dict] = None, get_text_separator: str = '')[source]# Loader that uses beautiful soup to parse HTML files. load() β†’ List[langchain.schema.Document][source]# Load data into document objects. class langchain.document_loaders.BigQueryLoader(query: str, project: Optional[str] = None, page_content_columns: Optional[List[str]] = None, metadata_columns: Optional[List[str]] = None)[source]# Loads a query result from BigQuery into a list of documents. Each document represents one row of the result. The page_content_columns are written into the page_content of the document. The metadata_columns are written into the metadata of the document. By default, all columns are written into the page_content and none into the metadata. load() β†’ List[langchain.schema.Document][source]# Load data into document objects. class langchain.document_loaders.BiliBiliLoader(video_urls: List[str])[source]# Loader that loads bilibili transcripts. load() β†’ List[langchain.schema.Document][source]# Load from bilibili url.
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-2
load() β†’ List[langchain.schema.Document][source]# Load from bilibili url. class langchain.document_loaders.BlackboardLoader(blackboard_course_url: str, bbrouter: str, load_all_recursively: bool = True, basic_auth: Optional[Tuple[str, str]] = None, cookies: Optional[dict] = None)[source]# Loader that loads all documents from a Blackboard course. This loader is not compatible with all Blackboard courses. It is only compatible with courses that use the new Blackboard interface. To use this loader, you must have the BbRouter cookie. You can get this cookie by logging into the course and then copying the value of the BbRouter cookie from the browser’s developer tools. Example from langchain.document_loaders import BlackboardLoader loader = BlackboardLoader( blackboard_course_url="https://blackboard.example.com/webapps/blackboard/execute/announcement?method=search&context=course_entry&course_id=_123456_1", bbrouter="expires:12345...", ) documents = loader.load() base_url: str# check_bs4() β†’ None[source]# Check if BeautifulSoup4 is installed. Raises ImportError – If BeautifulSoup4 is not installed. download(path: str) β†’ None[source]# Download a file from a url. Parameters path – Path to the file. folder_path: str# load() β†’ List[langchain.schema.Document][source]# Load data into document objects. Returns List of documents. load_all_recursively: bool# parse_filename(url: str) β†’ str[source]# Parse the filename from a url. Parameters url – Url to parse the filename from. Returns The filename.
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-3
Parameters url – Url to parse the filename from. Returns The filename. class langchain.document_loaders.BlockchainDocumentLoader(contract_address: str, blockchainType: langchain.document_loaders.blockchain.BlockchainType = BlockchainType.ETH_MAINNET, api_key: str = 'docs-demo', startToken: str = '')[source]# Loads elements from a blockchain smart contract into Langchain documents. The supported blockchains are: Ethereum mainnet, Ethereum Goerli testnet, Polygon mainnet, and Polygon Mumbai testnet. If no BlockchainType is specified, the default is Ethereum mainnet. The Loader uses the Alchemy API to interact with the blockchain. The API returns 100 NFTs per request and can be paginated using the startToken parameter. ALCHEMY_API_KEY environment variable must be set to use this loader. Future versions of this loader can: Support additional Alchemy APIs (e.g. getTransactions, etc.) Support additional blockain APIs (e.g. Infura, Opensea, etc.) load() β†’ List[langchain.schema.Document][source]# Load data into document objects. class langchain.document_loaders.CSVLoader(file_path: str, source_column: Optional[str] = None, csv_args: Optional[Dict] = None, encoding: Optional[str] = None)[source]# Loads a CSV file into a list of documents. Each document represents one row of the CSV file. Every row is converted into a key/value pair and outputted to a new line in the document’s page_content. The source for each document loaded from csv is set to the value of the file_path argument for all doucments by default. You can override this by setting the source_column argument to the name of a column in the CSV file.
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-4
name of a column in the CSV file. The source of each document will then be set to the value of the column with the name specified in source_column. Output Example:column1: value1 column2: value2 column3: value3 load() β†’ List[langchain.schema.Document][source]# Load data into document objects. class langchain.document_loaders.ChatGPTLoader(log_file: str, num_logs: int = - 1)[source]# Loader that loads conversations from exported ChatGPT data. load() β†’ List[langchain.schema.Document][source]# Load data into document objects. class langchain.document_loaders.CoNLLULoader(file_path: str)[source]# Load CoNLL-U files. load() β†’ List[langchain.schema.Document][source]# Load from file path. class langchain.document_loaders.CollegeConfidentialLoader(web_path: Union[str, List[str]], header_template: Optional[dict] = None)[source]# Loader that loads College Confidential webpages. load() β†’ List[langchain.schema.Document][source]# Load webpage. web_paths: List[str]# class langchain.document_loaders.ConfluenceLoader(url: str, api_key: Optional[str] = None, username: Optional[str] = None, oauth2: Optional[dict] = None, cloud: Optional[bool] = True, number_of_retries: Optional[int] = 3, min_retry_seconds: Optional[int] = 2, max_retry_seconds: Optional[int] = 10, confluence_kwargs: Optional[dict] = None)[source]# Load Confluence pages. Port of https://llamahub.ai/l/confluence This currently supports both username/api_key and Oauth2 login.
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-5
This currently supports both username/api_key and Oauth2 login. Specify a list page_ids and/or space_key to load in the corresponding pages into Document objects, if both are specified the union of both sets will be returned. You can also specify a boolean include_attachments to include attachments, this is set to False by default, if set to True all attachments will be downloaded and ConfluenceReader will extract the text from the attachments and add it to the Document object. Currently supported attachment types are: PDF, PNG, JPEG/JPG, SVG, Word and Excel. Hint: space_key and page_id can both be found in the URL of a page in Confluence - https://yoursite.atlassian.com/wiki/spaces/<space_key>/pages/<page_id> Example from langchain.document_loaders import ConfluenceLoader loader = ConfluenceLoader( url="https://yoursite.atlassian.com/wiki", username="me", api_key="12345" ) documents = loader.load(space_key="SPACE",limit=50) Parameters url (str) – _description_ api_key (str, optional) – _description_, defaults to None username (str, optional) – _description_, defaults to None oauth2 (dict, optional) – _description_, defaults to {} cloud (bool, optional) – _description_, defaults to True number_of_retries (Optional[int], optional) – How many times to retry, defaults to 3 min_retry_seconds (Optional[int], optional) – defaults to 2 max_retry_seconds (Optional[int], optional) – defaults to 10 confluence_kwargs (dict, optional) – additional kwargs to initialize confluence with Raises ValueError – Errors while validating input ImportError – Required dependencies not installed. is_public_page(page: dict) β†’ bool[source]#
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-6
is_public_page(page: dict) β†’ bool[source]# Check if a page is publicly accessible. load(space_key: Optional[str] = None, page_ids: Optional[List[str]] = None, label: Optional[str] = None, cql: Optional[str] = None, include_restricted_content: bool = False, include_archived_content: bool = False, include_attachments: bool = False, include_comments: bool = False, limit: Optional[int] = 50, max_pages: Optional[int] = 1000) β†’ List[langchain.schema.Document][source]# Parameters space_key (Optional[str], optional) – Space key retrieved from a confluence URL, defaults to None page_ids (Optional[List[str]], optional) – List of specific page IDs to load, defaults to None label (Optional[str], optional) – Get all pages with this label, defaults to None cql (Optional[str], optional) – CQL Expression, defaults to None include_restricted_content (bool, optional) – defaults to False include_archived_content (bool, optional) – Whether to include archived content, defaults to False include_attachments (bool, optional) – defaults to False include_comments (bool, optional) – defaults to False limit (int, optional) – Maximum number of pages to retrieve per request, defaults to 50 max_pages (int, optional) – Maximum number of pages to retrieve in total, defaults 1000 Raises ValueError – _description_ ImportError – _description_ Returns _description_ Return type List[Document] paginate_request(retrieval_method: Callable, **kwargs: Any) β†’ List[source]# Paginate the various methods to retrieve groups of pages. Unfortunately, due to page size, sometimes the Confluence API doesn’t match the limit value. If limit is >100 confluence
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-7
doesn’t match the limit value. If limit is >100 confluence seems to cap the response to 100. Also, due to the Atlassian Python package, we don’t get the β€œnext” values from the β€œ_links” key because they only return the value from the results key. So here, the pagination starts from 0 and goes until the max_pages, getting the limit number of pages with each request. We have to manually check if there are more docs based on the length of the returned list of pages, rather than just checking for the presence of a next key in the response like this page would have you do: https://developer.atlassian.com/server/confluence/pagination-in-the-rest-api/ Parameters retrieval_method (callable) – Function used to retrieve docs Returns List of documents Return type List process_attachment(page_id: str) β†’ List[str][source]# process_doc(link: str) β†’ str[source]# process_image(link: str) β†’ str[source]# process_page(page: dict, include_attachments: bool, include_comments: bool) β†’ langchain.schema.Document[source]# process_pages(pages: List[dict], include_restricted_content: bool, include_attachments: bool, include_comments: bool) β†’ List[langchain.schema.Document][source]# Process a list of pages into a list of documents. process_pdf(link: str) β†’ str[source]# process_svg(link: str) β†’ str[source]# process_xls(link: str) β†’ str[source]# static validate_init_args(url: Optional[str] = None, api_key: Optional[str] = None, username: Optional[str] = None, oauth2: Optional[dict] = None) β†’ Optional[List][source]# Validates proper combinations of init arguments
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-8
Validates proper combinations of init arguments class langchain.document_loaders.DataFrameLoader(data_frame: Any, page_content_column: str = 'text')[source]# Load Pandas DataFrames. load() β†’ List[langchain.schema.Document][source]# Load from the dataframe. class langchain.document_loaders.DiffbotLoader(api_token: str, urls: List[str], continue_on_failure: bool = True)[source]# Loader that loads Diffbot file json. load() β†’ List[langchain.schema.Document][source]# Extract text from Diffbot on all the URLs and return Document instances class langchain.document_loaders.DirectoryLoader(path: str, glob: str = '**/[!.]*', silent_errors: bool = False, load_hidden: bool = False, loader_cls: typing.Union[typing.Type[langchain.document_loaders.unstructured.UnstructuredFileLoader], typing.Type[langchain.document_loaders.text.TextLoader], typing.Type[langchain.document_loaders.html_bs.BSHTMLLoader]] = <class 'langchain.document_loaders.unstructured.UnstructuredFileLoader'>, loader_kwargs: typing.Optional[dict] = None, recursive: bool = False, show_progress: bool = False)[source]# Loading logic for loading documents from a directory. load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.DiscordChatLoader(chat_log: pd.DataFrame, user_id_col: str = 'ID')[source]# Load Discord chat logs. load() β†’ List[langchain.schema.Document][source]# Load all chat messages. class langchain.document_loaders.Docx2txtLoader(file_path: str)[source]# Loads a DOCX with docx2txt and chunks at character level. Defaults to check for local file, but if the file is a web path, it will download it
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-9
Defaults to check for local file, but if the file is a web path, it will download it to a temporary file, and use that, then clean up the temporary file after completion load() β†’ List[langchain.schema.Document][source]# Load given path as single page. class langchain.document_loaders.DuckDBLoader(query: str, database: str = ':memory:', read_only: bool = False, config: Optional[Dict[str, str]] = None, page_content_columns: Optional[List[str]] = None, metadata_columns: Optional[List[str]] = None)[source]# Loads a query result from DuckDB into a list of documents. Each document represents one row of the result. The page_content_columns are written into the page_content of the document. The metadata_columns are written into the metadata of the document. By default, all columns are written into the page_content and none into the metadata. load() β†’ List[langchain.schema.Document][source]# Load data into document objects. class langchain.document_loaders.EverNoteLoader(file_path: str)[source]# Loader to load in EverNote files.. load() β†’ List[langchain.schema.Document][source]# Load document from EverNote file. class langchain.document_loaders.FacebookChatLoader(path: str)[source]# Loader that loads Facebook messages json directory dump. load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.GCSDirectoryLoader(project_name: str, bucket: str, prefix: str = '')[source]# Loading logic for loading documents from GCS. load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.GCSFileLoader(project_name: str, bucket: str, blob: str)[source]#
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-10
Loading logic for loading documents from GCS. load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.GitLoader(repo_path: str, clone_url: Optional[str] = None, branch: Optional[str] = 'main', file_filter: Optional[Callable[[str], bool]] = None)[source]# Loads files from a Git repository into a list of documents. Repository can be local on disk available at repo_path, or remote at clone_url that will be cloned to repo_path. Currently supports only text files. Each document represents one file in the repository. The path points to the local Git repository, and the branch specifies the branch to load files from. By default, it loads from the main branch. load() β†’ List[langchain.schema.Document][source]# Load data into document objects. class langchain.document_loaders.GitbookLoader(web_page: str, load_all_paths: bool = False, base_url: Optional[str] = None, content_selector: str = 'main')[source]# Load GitBook data. load from either a single page, or load all (relative) paths in the navbar. load() β†’ List[langchain.schema.Document][source]# Fetch text from one single GitBook page. web_paths: List[str]# class langchain.document_loaders.GoogleApiClient(credentials_path: pathlib.Path = PosixPath('/home/docs/.credentials/credentials.json'), service_account_path: pathlib.Path = PosixPath('/home/docs/.credentials/credentials.json'), token_path: pathlib.Path = PosixPath('/home/docs/.credentials/token.json'))[source]# A Generic Google Api Client. To use, you should have the google_auth_oauthlib,youtube_transcript_api,google python package installed.
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-11
python package installed. As the google api expects credentials you need to set up a google account and register your Service. β€œhttps://developers.google.com/docs/api/quickstart/python” Example from langchain.document_loaders import GoogleApiClient google_api_client = GoogleApiClient( service_account_path=Path("path_to_your_sec_file.json") ) credentials_path: pathlib.Path = PosixPath('/home/docs/.credentials/credentials.json')# service_account_path: pathlib.Path = PosixPath('/home/docs/.credentials/credentials.json')# token_path: pathlib.Path = PosixPath('/home/docs/.credentials/token.json')# classmethod validate_channel_or_videoIds_is_set(values: Dict[str, Any]) β†’ Dict[str, Any][source]# Validate that either folder_id or document_ids is set, but not both. class langchain.document_loaders.GoogleApiYoutubeLoader(google_api_client: langchain.document_loaders.youtube.GoogleApiClient, channel_name: Optional[str] = None, video_ids: Optional[List[str]] = None, add_video_info: bool = True, captions_language: str = 'en', continue_on_failure: bool = False)[source]# Loader that loads all Videos from a Channel To use, you should have the googleapiclient,youtube_transcript_api python package installed. As the service needs a google_api_client, you first have to initialize the GoogleApiClient. Additionally you have to either provide a channel name or a list of videoids β€œhttps://developers.google.com/docs/api/quickstart/python” Example from langchain.document_loaders import GoogleApiClient from langchain.document_loaders import GoogleApiYoutubeLoader google_api_client = GoogleApiClient( service_account_path=Path("path_to_your_sec_file.json") ) loader = GoogleApiYoutubeLoader( google_api_client=google_api_client,
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-12
) loader = GoogleApiYoutubeLoader( google_api_client=google_api_client, channel_name = "CodeAesthetic" ) load.load() add_video_info: bool = True# captions_language: str = 'en'# channel_name: Optional[str] = None# continue_on_failure: bool = False# google_api_client: langchain.document_loaders.youtube.GoogleApiClient# load() β†’ List[langchain.schema.Document][source]# Load documents. classmethod validate_channel_or_videoIds_is_set(values: Dict[str, Any]) β†’ Dict[str, Any][source]# Validate that either folder_id or document_ids is set, but not both. video_ids: Optional[List[str]] = None# pydantic model langchain.document_loaders.GoogleDriveLoader[source]# Loader that loads Google Docs from Google Drive. Validators validate_credentials_path Β» credentials_path validate_folder_id_or_document_ids Β» all fields field credentials_path: pathlib.Path = PosixPath('/home/docs/.credentials/credentials.json')# field document_ids: Optional[List[str]] = None# field file_ids: Optional[List[str]] = None# field folder_id: Optional[str] = None# field recursive: bool = False# field service_account_key: pathlib.Path = PosixPath('/home/docs/.credentials/keys.json')# field token_path: pathlib.Path = PosixPath('/home/docs/.credentials/token.json')# load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.GutenbergLoader(file_path: str)[source]# Loader that uses urllib to load .txt web files. load() β†’ List[langchain.schema.Document][source]# Load file.
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-13
load() β†’ List[langchain.schema.Document][source]# Load file. class langchain.document_loaders.HNLoader(web_path: Union[str, List[str]], header_template: Optional[dict] = None)[source]# Load Hacker News data from either main page results or the comments page. load() β†’ List[langchain.schema.Document][source]# Get important HN webpage information. Components are: title content source url, time of post author of the post number of comments rank of the post load_comments(soup_info: Any) β†’ List[langchain.schema.Document][source]# Load comments from a HN post. load_results(soup: Any) β†’ List[langchain.schema.Document][source]# Load items from an HN page. web_paths: List[str]# class langchain.document_loaders.HuggingFaceDatasetLoader(path: str, page_content_column: str = 'text', name: Optional[str] = None, data_dir: Optional[str] = None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None, cache_dir: Optional[str] = None, keep_in_memory: Optional[bool] = None, save_infos: bool = False, use_auth_token: Optional[Union[bool, str]] = None, num_proc: Optional[int] = None)[source]# Loading logic for loading documents from the Hugging Face Hub. load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.IFixitLoader(web_path: str)[source]# Load iFixit repair guides, device wikis and answers. iFixit is the largest, open repair community on the web. The site contains nearly
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-14
iFixit is the largest, open repair community on the web. The site contains nearly 100k repair manuals, 200k Questions & Answers on 42k devices, and all the data is licensed under CC-BY. This loader will allow you to download the text of a repair guide, text of Q&A’s and wikis from devices on iFixit using their open APIs and web scraping. load() β†’ List[langchain.schema.Document][source]# Load data into document objects. load_device(url_override: Optional[str] = None, include_guides: bool = True) β†’ List[langchain.schema.Document][source]# load_guide(url_override: Optional[str] = None) β†’ List[langchain.schema.Document][source]# load_questions_and_answers(url_override: Optional[str] = None) β†’ List[langchain.schema.Document][source]# static load_suggestions(query: str = '', doc_type: str = 'all') β†’ List[langchain.schema.Document][source]# class langchain.document_loaders.IMSDbLoader(web_path: Union[str, List[str]], header_template: Optional[dict] = None)[source]# Loader that loads IMSDb webpages. load() β†’ List[langchain.schema.Document][source]# Load webpage. web_paths: List[str]# class langchain.document_loaders.ImageCaptionLoader(path_images: Union[str, List[str]], blip_processor: str = 'Salesforce/blip-image-captioning-base', blip_model: str = 'Salesforce/blip-image-captioning-base')[source]# Loader that loads the captions of an image load() β†’ List[langchain.schema.Document][source]# Load from a list of image files
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-15
Load from a list of image files class langchain.document_loaders.MathpixPDFLoader(file_path: str, processed_file_format: str = 'mmd', max_wait_time_seconds: int = 500, should_clean_pdf: bool = False, **kwargs: Any)[source]# clean_pdf(contents: str) β†’ str[source]# property data: dict# get_processed_pdf(pdf_id: str) β†’ str[source]# property headers: dict# load() β†’ List[langchain.schema.Document][source]# Load data into document objects. send_pdf() β†’ str[source]# property url: str# wait_for_processing(pdf_id: str) β†’ None[source]# class langchain.document_loaders.ModernTreasuryLoader(resource: str, organization_id: Optional[str] = None, api_key: Optional[str] = None)[source]# load() β†’ List[langchain.schema.Document][source]# Load data into document objects. class langchain.document_loaders.NotebookLoader(path: str, include_outputs: bool = False, max_output_length: int = 10, remove_newline: bool = False, traceback: bool = False)[source]# Loader that loads .ipynb notebook files. load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.NotionDBLoader(integration_token: str, database_id: str)[source]# Notion DB Loader. Reads content from pages within a Noton Database. :param integration_token: Notion integration token. :type integration_token: str :param database_id: Notion database id. :type database_id: str load() β†’ List[langchain.schema.Document][source]# Load documents from the Notion database. :returns: List of documents. :rtype: List[Document]
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-16
:returns: List of documents. :rtype: List[Document] load_page(page_id: str) β†’ langchain.schema.Document[source]# Read a page. class langchain.document_loaders.NotionDirectoryLoader(path: str)[source]# Loader that loads Notion directory dump. load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.ObsidianLoader(path: str, encoding: str = 'UTF-8', collect_metadata: bool = True)[source]# Loader that loads Obsidian files from disk. FRONT_MATTER_REGEX = re.compile('^---\\n(.*?)\\n---\\n', re.MULTILINE|re.DOTALL)# load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.OnlinePDFLoader(file_path: str)[source]# Loader that loads online PDFs. load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.OutlookMessageLoader(file_path: str)[source]# Loader that loads Outlook Message files using extract_msg. TeamMsgExtractor/msg-extractor load() β†’ List[langchain.schema.Document][source]# Load data into document objects. class langchain.document_loaders.PDFMinerLoader(file_path: str)[source]# Loader that uses PDFMiner to load PDF files. load() β†’ List[langchain.schema.Document][source]# Load file. class langchain.document_loaders.PDFMinerPDFasHTMLLoader(file_path: str)[source]# Loader that uses PDFMiner to load PDF files as HTML content. load() β†’ List[langchain.schema.Document][source]# Load file. langchain.document_loaders.PagedPDFSplitter#
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-17
Load file. langchain.document_loaders.PagedPDFSplitter# alias of langchain.document_loaders.pdf.PyPDFLoader class langchain.document_loaders.PlaywrightURLLoader(urls: List[str], continue_on_failure: bool = True, headless: bool = True, remove_selectors: Optional[List[str]] = None)[source]# Loader that uses Playwright and to load a page and unstructured to load the html. This is useful for loading pages that require javascript to render. urls# List of URLs to load. Type List[str] continue_on_failure# If True, continue loading other URLs on failure. Type bool headless# If True, the browser will run in headless mode. Type bool load() β†’ List[langchain.schema.Document][source]# Load the specified URLs using Playwright and create Document instances. Returns A list of Document instances with loaded content. Return type List[Document] class langchain.document_loaders.PyMuPDFLoader(file_path: str)[source]# Loader that uses PyMuPDF to load PDF files. load(**kwargs: Optional[Any]) β†’ List[langchain.schema.Document][source]# Load file. class langchain.document_loaders.PyPDFDirectoryLoader(path: str, glob: str = '**/[!.]*.pdf', silent_errors: bool = False, load_hidden: bool = False, recursive: bool = False)[source]# Loads a directory with PDF files with pypdf and chunks at character level. Loader also stores page numbers in metadatas. load() β†’ List[langchain.schema.Document][source]# Load data into document objects. class langchain.document_loaders.PyPDFLoader(file_path: str)[source]# Loads a PDF with pypdf and chunks at character level.
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-18
Loads a PDF with pypdf and chunks at character level. Loader also stores page numbers in metadatas. load() β†’ List[langchain.schema.Document][source]# Load given path as pages. class langchain.document_loaders.PythonLoader(file_path: str)[source]# Load Python files, respecting any non-default encoding if specified. class langchain.document_loaders.ReadTheDocsLoader(path: str, encoding: Optional[str] = None, errors: Optional[str] = None, **kwargs: Optional[Any])[source]# Loader that loads ReadTheDocs documentation directory dump. load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.RedditPostsLoader(client_id: str, client_secret: str, user_agent: str, search_queries: Sequence[str], mode: str, categories: Sequence[str] = ['new'], number_posts: Optional[int] = 10)[source]# Reddit posts loader. Read posts on a subreddit. First you need to go to https://www.reddit.com/prefs/apps/ and create your application load() β†’ List[langchain.schema.Document][source]# Load reddits. class langchain.document_loaders.RoamLoader(path: str)[source]# Loader that loads Roam files from disk. load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.S3DirectoryLoader(bucket: str, prefix: str = '')[source]# Loading logic for loading documents from s3. load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.S3FileLoader(bucket: str, key: str)[source]# Loading logic for loading documents from s3. load() β†’ List[langchain.schema.Document][source]#
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-19
load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.SRTLoader(file_path: str)[source]# Loader for .srt (subtitle) files. load() β†’ List[langchain.schema.Document][source]# Load using pysrt file. class langchain.document_loaders.SeleniumURLLoader(urls: List[str], continue_on_failure: bool = True, browser: Literal['chrome', 'firefox'] = 'chrome', executable_path: Optional[str] = None, headless: bool = True)[source]# Loader that uses Selenium and to load a page and unstructured to load the html. This is useful for loading pages that require javascript to render. urls# List of URLs to load. Type List[str] continue_on_failure# If True, continue loading other URLs on failure. Type bool browser# The browser to use, either β€˜chrome’ or β€˜firefox’. Type str executable_path# The path to the browser executable. Type Optional[str] headless# If True, the browser will run in headless mode. Type bool load() β†’ List[langchain.schema.Document][source]# Load the specified URLs using Selenium and create Document instances. Returns A list of Document instances with loaded content. Return type List[Document] class langchain.document_loaders.SitemapLoader(web_path: str, filter_urls: Optional[List[str]] = None, parsing_function: Optional[Callable] = None, blocksize: Optional[int] = None, blocknum: int = 0)[source]# Loader that fetches a sitemap and loads those URLs. load() β†’ List[langchain.schema.Document][source]# Load sitemap. parse_sitemap(soup: Any) β†’ List[dict][source]#
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-20
parse_sitemap(soup: Any) β†’ List[dict][source]# Parse sitemap xml and load into a list of dicts. web_paths: List[str]# class langchain.document_loaders.SlackDirectoryLoader(zip_path: str, workspace_url: Optional[str] = None)[source]# Loader for loading documents from a Slack directory dump. load() β†’ List[langchain.schema.Document][source]# Load and return documents from the Slack directory dump. class langchain.document_loaders.SpreedlyLoader(access_token: str, resource: str)[source]# load() β†’ List[langchain.schema.Document][source]# Load data into document objects. class langchain.document_loaders.StripeLoader(resource: str, access_token: Optional[str] = None)[source]# load() β†’ List[langchain.schema.Document][source]# Load data into document objects. class langchain.document_loaders.TelegramChatLoader(path: str)[source]# Loader that loads Telegram chat json directory dump. load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.TextLoader(file_path: str, encoding: Optional[str] = None)[source]# Load text files. load() β†’ List[langchain.schema.Document][source]# Load from file path. class langchain.document_loaders.TwitterTweetLoader(auth_handler: Union[OAuthHandler, OAuth2BearerHandler], twitter_users: Sequence[str], number_tweets: Optional[int] = 100)[source]# Twitter tweets loader. Read tweets of user twitter handle. First you need to go to https://developer.twitter.com/en/docs/twitter-api /getting-started/getting-access-to-the-twitter-api to get your token. And create a v2 version of the app.
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-21
to get your token. And create a v2 version of the app. classmethod from_bearer_token(oauth2_bearer_token: str, twitter_users: Sequence[str], number_tweets: Optional[int] = 100) β†’ langchain.document_loaders.twitter.TwitterTweetLoader[source]# Create a TwitterTweetLoader from OAuth2 bearer token. classmethod from_secrets(access_token: str, access_token_secret: str, consumer_key: str, consumer_secret: str, twitter_users: Sequence[str], number_tweets: Optional[int] = 100) β†’ langchain.document_loaders.twitter.TwitterTweetLoader[source]# Create a TwitterTweetLoader from access tokens and secrets. load() β†’ List[langchain.schema.Document][source]# Load tweets. class langchain.document_loaders.UnstructuredAPIFileIOLoader(file: IO, mode: str = 'single', url: str = 'https://api.unstructured.io/general/v0/general', api_key: str = '', **unstructured_kwargs: Any)[source]# Loader that uses the unstructured web API to load file IO objects. class langchain.document_loaders.UnstructuredAPIFileLoader(file_path: str, mode: str = 'single', url: str = 'https://api.unstructured.io/general/v0/general', api_key: str = '', **unstructured_kwargs: Any)[source]# Loader that uses the unstructured web API to load files. class langchain.document_loaders.UnstructuredEPubLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load epub files. class langchain.document_loaders.UnstructuredEmailLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load email files.
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-22
Loader that uses unstructured to load email files. class langchain.document_loaders.UnstructuredFileIOLoader(file: IO, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load file IO objects. class langchain.document_loaders.UnstructuredFileLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load files. class langchain.document_loaders.UnstructuredHTMLLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load HTML files. class langchain.document_loaders.UnstructuredImageLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load image files, such as PNGs and JPGs. class langchain.document_loaders.UnstructuredMarkdownLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load markdown files. class langchain.document_loaders.UnstructuredPDFLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load PDF files. class langchain.document_loaders.UnstructuredPowerPointLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load powerpoint files. class langchain.document_loaders.UnstructuredRTFLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load rtf files.
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-23
Loader that uses unstructured to load rtf files. class langchain.document_loaders.UnstructuredURLLoader(urls: List[str], continue_on_failure: bool = True, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load HTML files. load() β†’ List[langchain.schema.Document][source]# Load file. class langchain.document_loaders.UnstructuredWordDocumentLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load word documents. class langchain.document_loaders.WebBaseLoader(web_path: Union[str, List[str]], header_template: Optional[dict] = None)[source]# Loader that uses urllib and beautiful soup to load webpages. aload() β†’ List[langchain.schema.Document][source]# Load text from the urls in web_path async into Documents. default_parser: str = 'html.parser'# Default parser to use for BeautifulSoup. async fetch_all(urls: List[str]) β†’ Any[source]# Fetch all urls concurrently with rate limiting. load() β†’ List[langchain.schema.Document][source]# Load text from the url(s) in web_path. requests_per_second: int = 2# Max number of concurrent requests to make. scrape(parser: Optional[str] = None) β†’ Any[source]# Scrape data from webpage and return it in BeautifulSoup format. scrape_all(urls: List[str], parser: Optional[str] = None) β†’ List[Any][source]# Fetch all urls, then return soups for all results. property web_path: str# web_paths: List[str]# class langchain.document_loaders.WhatsAppChatLoader(path: str)[source]# Loader that loads WhatsApp messages text file.
https://python.langchain.com/en/latest/reference/modules/document_loaders.html
6401bc566761-24
Loader that loads WhatsApp messages text file. load() β†’ List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.YoutubeLoader(video_id: str, add_video_info: bool = False, language: str = 'en', continue_on_failure: bool = False)[source]# Loader that loads Youtube transcripts. classmethod from_youtube_url(youtube_url: str, **kwargs: Any) β†’ langchain.document_loaders.youtube.YoutubeLoader[source]# Given youtube URL, load video. load() β†’ List[langchain.schema.Document][source]# Load documents. previous Text Splitter next Vector Stores By Harrison Chase Β© Copyright 2023, Harrison Chase. Last updated on May 02, 2023.
https://python.langchain.com/en/latest/reference/modules/document_loaders.html