id
stringlengths
14
16
text
stringlengths
31
2.41k
source
stringlengths
53
121
3adc13d982a8-2
return llm.client.create(**kwargs) return _completion_with_retry(**kwargs) async def acompletion_with_retry( llm: Union[BaseOpenAI, OpenAIChat], **kwargs: Any ) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) class BaseOpenAI(BaseLLM): """Wrapper around OpenAI large language models.""" @property def lc_secrets(self) -> Dict[str, str]: return {"openai_api_key": "OPENAI_API_KEY"} @property def lc_serializable(self) -> bool: return True client: Any #: :meta private: model_name: str = Field("text-davinci-003", alias="model") """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt."""
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-3
"""How many completions to generate for each prompt.""" best_of: int = 1 """Generates best_of completions server-side and returns the "best".""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None openai_api_base: Optional[str] = None openai_organization: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None batch_size: int = 20 """Batch size to use when passing multiple documents to generate.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" allowed_special: Union[Literal["all"], AbstractSet[str]] = set() """Set of special tokens that are allowed。""" disallowed_special: Union[Literal["all"], Collection[str]] = "all" """Set of special tokens that are not allowed。""" tiktoken_model_name: Optional[str] = None """The model name to pass to tiktoken when using this class. Tiktoken is used to count the number of tokens in documents to constrain them to be under a certain limit. By default, when set to None, this will
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-4
be the same as the embedding model name. However, there are some cases where you may want to use this Embedding class with a model name not supported by tiktoken. This can include when using Azure embeddings or when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here.""" def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore """Initialize the OpenAI object.""" model_name = data.get("model_name", "") if model_name.startswith("gpt-3.5-turbo") or model_name.startswith("gpt-4"): warnings.warn( "You are trying to use a chat model. This way of initializing it is " "no longer supported. Instead, please use: " "`from langchain.chat_models import ChatOpenAI`" ) return OpenAIChat(**data) return super().__new__(cls) class Config: """Configuration for this pydantic object.""" allow_population_by_field_name = True @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = cls.all_required_field_names() extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: logger.warning(
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-5
if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai values["client"] = openai.Completion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`."
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-6
"Please install it with `pip install openai`." ) if values["streaming"] and values["n"] > 1: raise ValueError("Cannot stream results when n > 1.") if values["streaming"] and values["best_of"] > 1: raise ValueError("Cannot stream results when best_of > 1.") return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "request_timeout": self.request_timeout, "logit_bias": self.logit_bias, } # Azure gpt-35-turbo doesn't support best_of # don't specify best_of if it is 1 if self.best_of > 1: normal_params["best_of"] = self.best_of return {**normal_params, **self.model_kwargs} def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Call out to OpenAI's endpoint with k unique prompts. Args: prompts: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: The full LLM output. Example: .. code-block:: python
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-7
The full LLM output. Example: .. code-block:: python response = openai.generate(["Tell me a joke."]) """ # TODO: write a unit test for this params = self._invocation_params params = {**params, **kwargs} sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} # Get the token usage from the response. # Includes prompt, completion, and total tokens used. _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") params["stream"] = True response = _streaming_response_template() for stream_resp in completion_with_retry( self, prompt=_prompts, **params ): if run_manager: run_manager.on_llm_new_token( stream_resp["choices"][0]["text"], verbose=self.verbose, logprobs=stream_resp["choices"][0]["logprobs"], ) _update_response(response, stream_resp) choices.extend(response["choices"]) else: response = completion_with_retry(self, prompt=_prompts, **params) choices.extend(response["choices"]) if not self.streaming: # Can't update token usage if streaming update_token_usage(_keys, response, token_usage) return self.create_llm_result(choices, prompts, token_usage) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None,
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-8
prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Call out to OpenAI's endpoint async with k unique prompts.""" params = self._invocation_params params = {**params, **kwargs} sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} # Get the token usage from the response. # Includes prompt, completion, and total tokens used. _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") params["stream"] = True response = _streaming_response_template() async for stream_resp in await acompletion_with_retry( self, prompt=_prompts, **params ): if run_manager: await run_manager.on_llm_new_token( stream_resp["choices"][0]["text"], verbose=self.verbose, logprobs=stream_resp["choices"][0]["logprobs"], ) _update_response(response, stream_resp) choices.extend(response["choices"]) else: response = await acompletion_with_retry(self, prompt=_prompts, **params) choices.extend(response["choices"]) if not self.streaming: # Can't update token usage if streaming update_token_usage(_keys, response, token_usage) return self.create_llm_result(choices, prompts, token_usage)
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-9
return self.create_llm_result(choices, prompts, token_usage) def get_sub_prompts( self, params: Dict[str, Any], prompts: List[str], stop: Optional[List[str]] = None, ) -> List[List[str]]: """Get the sub prompts for llm call.""" if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop if params["max_tokens"] == -1: if len(prompts) != 1: raise ValueError( "max_tokens set to -1 not supported for multiple inputs." ) params["max_tokens"] = self.max_tokens_for_prompt(prompts[0]) sub_prompts = [ prompts[i : i + self.batch_size] for i in range(0, len(prompts), self.batch_size) ] return sub_prompts def create_llm_result( self, choices: Any, prompts: List[str], token_usage: Dict[str, int] ) -> LLMResult: """Create the LLMResult from the choices and prompts.""" generations = [] for i, _ in enumerate(prompts): sub_choices = choices[i * self.n : (i + 1) * self.n] generations.append( [ Generation( text=choice["text"], generation_info=dict( finish_reason=choice.get("finish_reason"), logprobs=choice.get("logprobs"), ), ) for choice in sub_choices ] )
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-10
), ) for choice in sub_choices ] ) llm_output = {"token_usage": token_usage, "model_name": self.model_name} return LLMResult(generations=generations, llm_output=llm_output) def stream(self, prompt: str, stop: Optional[List[str]] = None) -> Generator: """Call OpenAI with streaming flag and return the resulting generator. BETA: this is a beta feature while we figure out the right abstraction. Once that happens, this interface could change. Args: prompt: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens from OpenAI. Example: .. code-block:: python generator = openai.stream("Tell me a joke.") for token in generator: yield token """ params = self.prep_streaming_params(stop) generator = self.client.create(prompt=prompt, **params) return generator def prep_streaming_params(self, stop: Optional[List[str]] = None) -> Dict[str, Any]: """Prepare the params for streaming.""" params = self._invocation_params if "best_of" in params and params["best_of"] != 1: raise ValueError("OpenAI only supports best_of == 1 for streaming") if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop params["stream"] = True return params @property def _invocation_params(self) -> Dict[str, Any]:
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-11
@property def _invocation_params(self) -> Dict[str, Any]: """Get the parameters used to invoke the model.""" openai_creds: Dict[str, Any] = { "api_key": self.openai_api_key, "api_base": self.openai_api_base, "organization": self.openai_organization, } if self.openai_proxy: import openai openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501 return {**openai_creds, **self._default_params} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "openai" def get_token_ids(self, text: str) -> List[int]: """Get the token IDs using the tiktoken package.""" # tiktoken NOT supported for Python < 3.8 if sys.version_info[1] < 8: return super().get_num_tokens(text) try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to calculate get_num_tokens. " "Please install it with `pip install tiktoken`." ) model_name = self.tiktoken_model_name or self.model_name try: enc = tiktoken.encoding_for_model(model_name) except KeyError:
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-12
enc = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" enc = tiktoken.get_encoding(model) return enc.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) @staticmethod def modelname_to_contextsize(modelname: str) -> int: """Calculate the maximum number of tokens possible to generate for a model. Args: modelname: The modelname we want to know the context size for. Returns: The maximum context size Example: .. code-block:: python max_tokens = openai.modelname_to_contextsize("text-davinci-003") """ model_token_mapping = { "gpt-4": 8192, "gpt-4-0314": 8192, "gpt-4-0613": 8192, "gpt-4-32k": 32768, "gpt-4-32k-0314": 32768, "gpt-4-32k-0613": 32768, "gpt-3.5-turbo": 4096, "gpt-3.5-turbo-0301": 4096, "gpt-3.5-turbo-0613": 4096, "gpt-3.5-turbo-16k": 16385, "gpt-3.5-turbo-16k-0613": 16385, "text-ada-001": 2049,
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-13
"text-ada-001": 2049, "ada": 2049, "text-babbage-001": 2040, "babbage": 2049, "text-curie-001": 2049, "curie": 2049, "davinci": 2049, "text-davinci-003": 4097, "text-davinci-002": 4097, "code-davinci-002": 8001, "code-davinci-001": 8001, "code-cushman-002": 2048, "code-cushman-001": 2048, } # handling finetuned models if "ft-" in modelname: modelname = modelname.split(":")[0] context_size = model_token_mapping.get(modelname, None) if context_size is None: raise ValueError( f"Unknown model: {modelname}. Please provide a valid OpenAI model name." "Known models are: " + ", ".join(model_token_mapping.keys()) ) return context_size @property def max_context_size(self) -> int: """Get max context size for this model.""" return self.modelname_to_contextsize(self.model_name) def max_tokens_for_prompt(self, prompt: str) -> int: """Calculate the maximum number of tokens possible to generate for a prompt. Args: prompt: The prompt to pass into the model. Returns: The maximum number of tokens to generate for a prompt. Example: .. code-block:: python max_tokens = openai.max_token_for_prompt("Tell me a joke.")
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-14
max_tokens = openai.max_token_for_prompt("Tell me a joke.") """ num_tokens = self.get_num_tokens(prompt) return self.max_context_size - num_tokens [docs]class OpenAI(BaseOpenAI): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import OpenAI openai = OpenAI(model_name="text-davinci-003") """ @property def _invocation_params(self) -> Dict[str, Any]: return {**{"model": self.model_name}, **super()._invocation_params} [docs]class AzureOpenAI(BaseOpenAI): """Wrapper around Azure-specific OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import AzureOpenAI openai = AzureOpenAI(model_name="text-davinci-003") """ deployment_name: str = "" """Deployment name to use.""" openai_api_type: str = "azure" openai_api_version: str = "" @root_validator()
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-15
openai_api_version: str = "" @root_validator() def validate_azure_settings(cls, values: Dict) -> Dict: values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", ) values["openai_api_type"] = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", ) return values @property def _identifying_params(self) -> Mapping[str, Any]: return { **{"deployment_name": self.deployment_name}, **super()._identifying_params, } @property def _invocation_params(self) -> Dict[str, Any]: openai_params = { "engine": self.deployment_name, "api_type": self.openai_api_type, "api_version": self.openai_api_version, } return {**openai_params, **super()._invocation_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "azure" [docs]class OpenAIChat(BaseLLM): """Wrapper around OpenAI Chat large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import OpenAIChat
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-16
.. code-block:: python from langchain.llms import OpenAIChat openaichat = OpenAIChat(model_name="gpt-3.5-turbo") """ client: Any #: :meta private: model_name: str = "gpt-3.5-turbo" """Model name to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None openai_api_base: Optional[str] = None # to support explicit proxy for OpenAI openai_proxy: Optional[str] = None max_retries: int = 6 """Maximum number of retries to make when generating.""" prefix_messages: List = Field(default_factory=list) """Series of messages for Chat input.""" streaming: bool = False """Whether to stream the results or not.""" allowed_special: Union[Literal["all"], AbstractSet[str]] = set() """Set of special tokens that are allowed。""" disallowed_special: Union[Literal["all"], Collection[str]] = "all" """Set of special tokens that are not allowed。""" @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.")
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-17
raise ValueError(f"Found {field_name} supplied twice.") extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) openai_api_base = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) openai_proxy = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="" ) try: import openai openai.api_key = openai_api_key if openai_api_base: openai.api_base = openai_api_base if openai_organization: openai.organization = openai_organization if openai_proxy: openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501 except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely "
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-18
"`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) warnings.warn( "You are trying to use a chat model. This way of initializing it is " "no longer supported. Instead, please use: " "`from langchain.chat_models import ChatOpenAI`" ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return self.model_kwargs def _get_chat_params( self, prompts: List[str], stop: Optional[List[str]] = None ) -> Tuple: if len(prompts) > 1: raise ValueError( f"OpenAIChat currently only supports single prompt, got {prompts}" ) messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}] params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params} if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop if params.get("max_tokens") == -1: # for ChatGPT api, omitting max_tokens is equivalent to having no limit del params["max_tokens"] return messages, params def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None,
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-19
run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: messages, params = self._get_chat_params(prompts, stop) params = {**params, **kwargs} if self.streaming: response = "" params["stream"] = True for stream_resp in completion_with_retry(self, messages=messages, **params): token = stream_resp["choices"][0]["delta"].get("content", "") response += token if run_manager: run_manager.on_llm_new_token( token, ) return LLMResult( generations=[[Generation(text=response)]], ) else: full_response = completion_with_retry(self, messages=messages, **params) llm_output = { "token_usage": full_response["usage"], "model_name": self.model_name, } return LLMResult( generations=[ [Generation(text=full_response["choices"][0]["message"]["content"])] ], llm_output=llm_output, ) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: messages, params = self._get_chat_params(prompts, stop) params = {**params, **kwargs} if self.streaming: response = "" params["stream"] = True async for stream_resp in await acompletion_with_retry( self, messages=messages, **params ):
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-20
self, messages=messages, **params ): token = stream_resp["choices"][0]["delta"].get("content", "") response += token if run_manager: await run_manager.on_llm_new_token( token, ) return LLMResult( generations=[[Generation(text=response)]], ) else: full_response = await acompletion_with_retry( self, messages=messages, **params ) llm_output = { "token_usage": full_response["usage"], "model_name": self.model_name, } return LLMResult( generations=[ [Generation(text=full_response["choices"][0]["message"]["content"])] ], llm_output=llm_output, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "openai-chat" [docs] def get_token_ids(self, text: str) -> List[int]: """Get the token IDs using the tiktoken package.""" # tiktoken NOT supported for Python < 3.8 if sys.version_info[1] < 8: return super().get_token_ids(text) try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to calculate get_num_tokens. " "Please install it with `pip install tiktoken`." )
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
3adc13d982a8-21
"Please install it with `pip install tiktoken`." ) enc = tiktoken.encoding_for_model(self.model_name) return enc.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, )
https://api.python.langchain.com/en/stable/_modules/langchain/llms/openai.html
bc62eab14e53-0
Source code for langchain.llms.self_hosted """Run model inference on self-hosted remote hardware.""" import importlib.util import logging import pickle from typing import Any, Callable, List, Mapping, Optional from pydantic import Extra from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens logger = logging.getLogger(__name__) def _generate_text( pipeline: Any, prompt: str, *args: Any, stop: Optional[List[str]] = None, **kwargs: Any, ) -> str: """Inference function to send to the remote hardware. Accepts a pipeline callable (or, more likely, a key pointing to the model on the cluster's object store) and returns text predictions for each document in the batch. """ text = pipeline(prompt, *args, **kwargs) if stop is not None: text = enforce_stop_tokens(text, stop) return text def _send_pipeline_to_device(pipeline: Any, device: int) -> Any: """Send a pipeline to a device on the cluster.""" if isinstance(pipeline, str): with open(pipeline, "rb") as f: pipeline = pickle.load(f) if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0:
https://api.python.langchain.com/en/stable/_modules/langchain/llms/self_hosted.html
bc62eab14e53-1
) if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) pipeline.device = torch.device(device) pipeline.model = pipeline.model.to(pipeline.device) return pipeline [docs]class SelfHostedPipeline(LLM): """Run model inference on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example for custom pipeline and inference functions: .. code-block:: python from langchain.llms import SelfHostedPipeline from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh def load_pipeline(): tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelForCausalLM.from_pretrained("gpt2") return pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10 ) def inference_fn(pipeline, prompt, stop = None): return pipeline(prompt)[0]["generated_text"] gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") llm = SelfHostedPipeline( model_load_fn=load_pipeline,
https://api.python.langchain.com/en/stable/_modules/langchain/llms/self_hosted.html
bc62eab14e53-2
llm = SelfHostedPipeline( model_load_fn=load_pipeline, hardware=gpu, model_reqs=model_reqs, inference_fn=inference_fn ) Example for <2GB model (can be serialized and sent directly to the server): .. code-block:: python from langchain.llms import SelfHostedPipeline import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") my_model = ... llm = SelfHostedPipeline.from_pipeline( pipeline=my_model, hardware=gpu, model_reqs=["./", "torch", "transformers"], ) Example passing model path for larger models: .. code-block:: python from langchain.llms import SelfHostedPipeline import runhouse as rh import pickle from transformers import pipeline generator = pipeline(model="gpt2") rh.blob(pickle.dumps(generator), path="models/pipeline.pkl" ).save().to(gpu, path="models") llm = SelfHostedPipeline.from_pipeline( pipeline="models/pipeline.pkl", hardware=gpu, model_reqs=["./", "torch", "transformers"], ) """ pipeline_ref: Any #: :meta private: client: Any #: :meta private: inference_fn: Callable = _generate_text #: :meta private: """Inference function to send to the remote hardware.""" hardware: Any """Remote hardware to send the inference function to.""" model_load_fn: Callable """Function to load the model remotely on the server.""" load_fn_kwargs: Optional[dict] = None
https://api.python.langchain.com/en/stable/_modules/langchain/llms/self_hosted.html
bc62eab14e53-3
load_fn_kwargs: Optional[dict] = None """Key word arguments to pass to the model load function.""" model_reqs: List[str] = ["./", "torch"] """Requirements to install on hardware to inference the model.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def __init__(self, **kwargs: Any): """Init the pipeline with an auxiliary function. The load function must be in global scope to be imported and run on the server, i.e. in a module and not a REPL or closure. Then, initialize the remote inference function. """ super().__init__(**kwargs) try: import runhouse as rh except ImportError: raise ImportError( "Could not import runhouse python package. " "Please install it with `pip install runhouse`." ) remote_load_fn = rh.function(fn=self.model_load_fn).to( self.hardware, reqs=self.model_reqs ) _load_fn_kwargs = self.load_fn_kwargs or {} self.pipeline_ref = remote_load_fn.remote(**_load_fn_kwargs) self.client = rh.function(fn=self.inference_fn).to( self.hardware, reqs=self.model_reqs ) [docs] @classmethod def from_pipeline( cls, pipeline: Any, hardware: Any, model_reqs: Optional[List[str]] = None, device: int = 0, **kwargs: Any, ) -> LLM: """Init the SelfHostedPipeline from a pipeline object or string.""" if not isinstance(pipeline, str): logger.warning(
https://api.python.langchain.com/en/stable/_modules/langchain/llms/self_hosted.html
bc62eab14e53-4
if not isinstance(pipeline, str): logger.warning( "Serializing pipeline to send to remote hardware. " "Note, it can be quite slow" "to serialize and send large models with each execution. " "Consider sending the pipeline" "to the cluster and passing the path to the pipeline instead." ) load_fn_kwargs = {"pipeline": pipeline, "device": device} return cls( load_fn_kwargs=load_fn_kwargs, model_load_fn=_send_pipeline_to_device, hardware=hardware, model_reqs=["transformers", "torch"] + (model_reqs or []), **kwargs, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"hardware": self.hardware}, } @property def _llm_type(self) -> str: return "self_hosted_llm" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: return self.client( pipeline=self.pipeline_ref, prompt=prompt, stop=stop, **kwargs )
https://api.python.langchain.com/en/stable/_modules/langchain/llms/self_hosted.html
63d04671c232-0
Source code for langchain.document_loaders.git import os from typing import Callable, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class GitLoader(BaseLoader): """Loads files from a Git repository into a list of documents. Repository can be local on disk available at `repo_path`, or remote at `clone_url` that will be cloned to `repo_path`. Currently supports only text files. Each document represents one file in the repository. The `path` points to the local Git repository, and the `branch` specifies the branch to load files from. By default, it loads from the `main` branch. """ def __init__( self, repo_path: str, clone_url: Optional[str] = None, branch: Optional[str] = "main", file_filter: Optional[Callable[[str], bool]] = None, ): self.repo_path = repo_path self.clone_url = clone_url self.branch = branch self.file_filter = file_filter [docs] def load(self) -> List[Document]: try: from git import Blob, Repo # type: ignore except ImportError as ex: raise ImportError( "Could not import git python package. " "Please install it with `pip install GitPython`." ) from ex if not os.path.exists(self.repo_path) and self.clone_url is None: raise ValueError(f"Path {self.repo_path} does not exist") elif self.clone_url: repo = Repo.clone_from(self.clone_url, self.repo_path) repo.git.checkout(self.branch) else: repo = Repo(self.repo_path)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/git.html
63d04671c232-1
else: repo = Repo(self.repo_path) repo.git.checkout(self.branch) docs: List[Document] = [] for item in repo.tree().traverse(): if not isinstance(item, Blob): continue file_path = os.path.join(self.repo_path, item.path) ignored_files = repo.ignored([file_path]) # type: ignore if len(ignored_files): continue # uses filter to skip files if self.file_filter and not self.file_filter(file_path): continue rel_file_path = os.path.relpath(file_path, self.repo_path) try: with open(file_path, "rb") as f: content = f.read() file_type = os.path.splitext(item.name)[1] # loads only text files try: text_content = content.decode("utf-8") except UnicodeDecodeError: continue metadata = { "source": rel_file_path, "file_path": rel_file_path, "file_name": item.name, "file_type": file_type, } doc = Document(page_content=text_content, metadata=metadata) docs.append(doc) except Exception as e: print(f"Error reading file {file_path}: {e}") return docs
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/git.html
25419ff204c9-0
Source code for langchain.document_loaders.recursive_url_loader from typing import Iterator, List, Optional, Set from urllib.parse import urlparse import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class RecursiveUrlLoader(BaseLoader): """Loader that loads all child links from a given url.""" def __init__(self, url: str, exclude_dirs: Optional[str] = None) -> None: """Initialize with URL to crawl and any sub-directories to exclude.""" self.url = url self.exclude_dirs = exclude_dirs [docs] def get_child_links_recursive( self, url: str, visited: Optional[Set[str]] = None ) -> Set[str]: """Recursively get all child links starting with the path of the input URL.""" try: from bs4 import BeautifulSoup except ImportError: raise ImportError( "The BeautifulSoup package is required for the RecursiveUrlLoader." ) # Construct the base and parent URLs parsed_url = urlparse(url) base_url = f"{parsed_url.scheme}://{parsed_url.netloc}" parent_url = "/".join(parsed_url.path.split("/")[:-1]) current_path = parsed_url.path # Add a trailing slash if not present if not base_url.endswith("/"): base_url += "/" if not parent_url.endswith("/"): parent_url += "/" # Exclude the root and parent from list visited = set() if visited is None else visited # Exclude the links that start with any of the excluded directories if self.exclude_dirs and any( url.startswith(exclude_dir) for exclude_dir in self.exclude_dirs ): return visited
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/recursive_url_loader.html
25419ff204c9-1
): return visited # Get all links that are relative to the root of the website response = requests.get(url) soup = BeautifulSoup(response.text, "html.parser") all_links = [link.get("href") for link in soup.find_all("a")] # Extract only the links that are children of the current URL child_links = list( { link for link in all_links if link and link.startswith(current_path) and link != current_path } ) # Get absolute path for all root relative links listed absolute_paths = [ f"{urlparse(base_url).scheme}://{urlparse(base_url).netloc}{link}" for link in child_links ] # Store the visited links and recursively visit the children for link in absolute_paths: # Check all unvisited links if link not in visited: visited.add(link) # If the link is a directory (w/ children) then visit it if link.endswith("/"): visited.update(self.get_child_links_recursive(link, visited)) return visited [docs] def lazy_load(self) -> Iterator[Document]: from langchain.document_loaders import WebBaseLoader """Lazy load web pages.""" child_links = self.get_child_links_recursive(self.url) loader = WebBaseLoader(list(child_links)) return loader.lazy_load() [docs] def load(self) -> List[Document]: """Load web pages.""" return list(self.lazy_load())
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/recursive_url_loader.html
8e9c207507db-0
Source code for langchain.document_loaders.obsidian """Loader that loads Obsidian directory dump.""" import re from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class ObsidianLoader(BaseLoader): """Loader that loads Obsidian files from disk.""" FRONT_MATTER_REGEX = re.compile(r"^---\n(.*?)\n---\n", re.MULTILINE | re.DOTALL) def __init__( self, path: str, encoding: str = "UTF-8", collect_metadata: bool = True ): """Initialize with path.""" self.file_path = path self.encoding = encoding self.collect_metadata = collect_metadata def _parse_front_matter(self, content: str) -> dict: """Parse front matter metadata from the content and return it as a dict.""" if not self.collect_metadata: return {} match = self.FRONT_MATTER_REGEX.search(content) front_matter = {} if match: lines = match.group(1).split("\n") for line in lines: if ":" in line: key, value = line.split(":", 1) front_matter[key.strip()] = value.strip() else: # Skip lines without a colon continue return front_matter def _remove_front_matter(self, content: str) -> str: """Remove front matter metadata from the given content.""" if not self.collect_metadata: return content return self.FRONT_MATTER_REGEX.sub("", content) [docs] def load(self) -> List[Document]: """Load documents.""" ps = list(Path(self.file_path).glob("**/*.md"))
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/obsidian.html
8e9c207507db-1
ps = list(Path(self.file_path).glob("**/*.md")) docs = [] for p in ps: with open(p, encoding=self.encoding) as f: text = f.read() front_matter = self._parse_front_matter(text) text = self._remove_front_matter(text) metadata = { "source": str(p.name), "path": str(p), "created": p.stat().st_ctime, "last_modified": p.stat().st_mtime, "last_accessed": p.stat().st_atime, **front_matter, } docs.append(Document(page_content=text, metadata=metadata)) return docs
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/obsidian.html
ff3f3998b6b7-0
Source code for langchain.document_loaders.json_loader """Loader that loads data from JSON.""" import json from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class JSONLoader(BaseLoader): """Loads a JSON file and references a jq schema provided to load the text into documents. Example: [{"text": ...}, {"text": ...}, {"text": ...}] -> schema = .[].text {"key": [{"text": ...}, {"text": ...}, {"text": ...}]} -> schema = .key[].text ["", "", ""] -> schema = .[] """ def __init__( self, file_path: Union[str, Path], jq_schema: str, content_key: Optional[str] = None, metadata_func: Optional[Callable[[Dict, Dict], Dict]] = None, text_content: bool = True, ): """Initialize the JSONLoader. Args: file_path (Union[str, Path]): The path to the JSON file. jq_schema (str): The jq schema to use to extract the data or text from the JSON. content_key (str): The key to use to extract the content from the JSON if the jq_schema results to a list of objects (dict). metadata_func (Callable[Dict, Dict]): A function that takes in the JSON object extracted by the jq_schema and the default metadata and returns a dict of the updated metadata. text_content (bool): Boolean flag to indicates whether the content is in string format, default to True """ try: import jq # noqa:F401
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/json_loader.html
ff3f3998b6b7-1
""" try: import jq # noqa:F401 except ImportError: raise ImportError( "jq package not found, please install it with `pip install jq`" ) self.file_path = Path(file_path).resolve() self._jq_schema = jq.compile(jq_schema) self._content_key = content_key self._metadata_func = metadata_func self._text_content = text_content [docs] def load(self) -> List[Document]: """Load and return documents from the JSON file.""" data = self._jq_schema.input(json.loads(self.file_path.read_text())) # Perform some validation # This is not a perfect validation, but it should catch most cases # and prevent the user from getting a cryptic error later on. if self._content_key is not None: self._validate_content_key(data) docs = [] for i, sample in enumerate(data, 1): metadata = dict( source=str(self.file_path), seq_num=i, ) text = self._get_text(sample=sample, metadata=metadata) docs.append(Document(page_content=text, metadata=metadata)) return docs def _get_text(self, sample: Any, metadata: dict) -> str: """Convert sample to string format""" if self._content_key is not None: content = sample.get(self._content_key) if self._metadata_func is not None: # We pass in the metadata dict to the metadata_func # so that the user can customize the default metadata # based on the content of the JSON object. metadata = self._metadata_func(sample, metadata) else: content = sample
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/json_loader.html
ff3f3998b6b7-2
else: content = sample if self._text_content and not isinstance(content, str): raise ValueError( f"Expected page_content is string, got {type(content)} instead. \ Set `text_content=False` if the desired input for \ `page_content` is not a string" ) # In case the text is None, set it to an empty string elif isinstance(content, str): return content elif isinstance(content, dict): return json.dumps(content) if content else "" else: return str(content) if content is not None else "" def _validate_content_key(self, data: Any) -> None: """Check if content key is valid""" sample = data.first() if not isinstance(sample, dict): raise ValueError( f"Expected the jq schema to result in a list of objects (dict), \ so sample must be a dict but got `{type(sample)}`" ) if sample.get(self._content_key) is None: raise ValueError( f"Expected the jq schema to result in a list of objects (dict) \ with the key `{self._content_key}`" ) if self._metadata_func is not None: sample_metadata = self._metadata_func(sample, {}) if not isinstance(sample_metadata, dict): raise ValueError( f"Expected the metadata_func to return a dict but got \ `{type(sample_metadata)}`" )
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/json_loader.html
5f9c0973036f-0
Source code for langchain.document_loaders.whatsapp_chat import re from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader def concatenate_rows(date: str, sender: str, text: str) -> str: """Combine message information in a readable format ready to be used.""" return f"{sender} on {date}: {text}\n\n" [docs]class WhatsAppChatLoader(BaseLoader): """Loader that loads WhatsApp messages text file.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.file_path) text_content = "" with open(p, encoding="utf8") as f: lines = f.readlines() message_line_regex = r""" \[? ( \d{1,4} [\/.] \d{1,2} [\/.] \d{1,4} ,\s \d{1,2} :\d{2} (?: :\d{2} )? (?:[\s_](?:AM|PM))? ) \]? [\s-]* ([~\w\s]+) [:]+ \s (.+) """ ignore_lines = ["This message was deleted", "<Media omitted>"] for line in lines: result = re.match( message_line_regex, line.strip(), flags=re.VERBOSE | re.IGNORECASE ) if result:
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/whatsapp_chat.html
5f9c0973036f-1
) if result: date, sender, text = result.groups() if text not in ignore_lines: text_content += concatenate_rows(date, sender, text) metadata = {"source": str(p)} return [Document(page_content=text_content, metadata=metadata)]
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/whatsapp_chat.html
f99a42fdbb29-0
Source code for langchain.document_loaders.youtube """Loader that loads YouTube transcript.""" from __future__ import annotations import logging from pathlib import Path from typing import Any, Dict, List, Optional, Sequence, Union from urllib.parse import parse_qs, urlparse from pydantic import root_validator from pydantic.dataclasses import dataclass from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) SCOPES = ["https://www.googleapis.com/auth/youtube.readonly"] [docs]@dataclass class GoogleApiClient: """A Generic Google Api Client. To use, you should have the ``google_auth_oauthlib,youtube_transcript_api,google`` python package installed. As the google api expects credentials you need to set up a google account and register your Service. "https://developers.google.com/docs/api/quickstart/python" Example: .. code-block:: python from langchain.document_loaders import GoogleApiClient google_api_client = GoogleApiClient( service_account_path=Path("path_to_your_sec_file.json") ) """ credentials_path: Path = Path.home() / ".credentials" / "credentials.json" service_account_path: Path = Path.home() / ".credentials" / "credentials.json" token_path: Path = Path.home() / ".credentials" / "token.json" def __post_init__(self) -> None: self.creds = self._load_credentials() [docs] @root_validator def validate_channel_or_videoIds_is_set( cls, values: Dict[str, Any] ) -> Dict[str, Any]: """Validate that either folder_id or document_ids is set, but not both."""
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
f99a42fdbb29-1
"""Validate that either folder_id or document_ids is set, but not both.""" if not values.get("credentials_path") and not values.get( "service_account_path" ): raise ValueError("Must specify either channel_name or video_ids") return values def _load_credentials(self) -> Any: """Load credentials.""" # Adapted from https://developers.google.com/drive/api/v3/quickstart/python try: from google.auth.transport.requests import Request from google.oauth2 import service_account from google.oauth2.credentials import Credentials from google_auth_oauthlib.flow import InstalledAppFlow from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401 except ImportError: raise ImportError( "You must run" "`pip install --upgrade " "google-api-python-client google-auth-httplib2 " "google-auth-oauthlib " "youtube-transcript-api` " "to use the Google Drive loader" ) creds = None if self.service_account_path.exists(): return service_account.Credentials.from_service_account_file( str(self.service_account_path) ) if self.token_path.exists(): creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( str(self.credentials_path), SCOPES ) creds = flow.run_local_server(port=0) with open(self.token_path, "w") as token: token.write(creds.to_json()) return creds
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
f99a42fdbb29-2
token.write(creds.to_json()) return creds ALLOWED_SCHEMAS = {"http", "https"} ALLOWED_NETLOCK = { "youtu.be", "m.youtube.com", "youtube.com", "www.youtube.com", "www.youtube-nocookie.com", "vid.plus", } def _parse_video_id(url: str) -> Optional[str]: """Parse a youtube url and return the video id if valid, otherwise None.""" parsed_url = urlparse(url) if parsed_url.scheme not in ALLOWED_SCHEMAS: return None if parsed_url.netloc not in ALLOWED_NETLOCK: return None path = parsed_url.path if path.endswith("/watch"): query = parsed_url.query parsed_query = parse_qs(query) if "v" in parsed_query: ids = parsed_query["v"] video_id = ids if isinstance(ids, str) else ids[0] else: return None else: path = parsed_url.path.lstrip("/") video_id = path.split("/")[-1] if len(video_id) != 11: # Video IDs are 11 characters long return None return video_id [docs]class YoutubeLoader(BaseLoader): """Loader that loads Youtube transcripts.""" def __init__( self, video_id: str, add_video_info: bool = False, language: Union[str, Sequence[str]] = "en", translation: str = "en", continue_on_failure: bool = False, ): """Initialize with YouTube video ID.""" self.video_id = video_id self.add_video_info = add_video_info self.language = language
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
f99a42fdbb29-3
self.add_video_info = add_video_info self.language = language if isinstance(language, str): self.language = [language] else: self.language = language self.translation = translation self.continue_on_failure = continue_on_failure [docs] @staticmethod def extract_video_id(youtube_url: str) -> str: """Extract video id from common YT urls.""" video_id = _parse_video_id(youtube_url) if not video_id: raise ValueError( f"Could not determine the video ID for the URL {youtube_url}" ) return video_id [docs] @classmethod def from_youtube_url(cls, youtube_url: str, **kwargs: Any) -> YoutubeLoader: """Given youtube URL, load video.""" video_id = cls.extract_video_id(youtube_url) return cls(video_id, **kwargs) [docs] def load(self) -> List[Document]: """Load documents.""" try: from youtube_transcript_api import ( NoTranscriptFound, TranscriptsDisabled, YouTubeTranscriptApi, ) except ImportError: raise ImportError( "Could not import youtube_transcript_api python package. " "Please install it with `pip install youtube-transcript-api`." ) metadata = {"source": self.video_id} if self.add_video_info: # Get more video meta info # Such as title, description, thumbnail url, publish_date video_info = self._get_video_info() metadata.update(video_info) try: transcript_list = YouTubeTranscriptApi.list_transcripts(self.video_id) except TranscriptsDisabled: return [] try:
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
f99a42fdbb29-4
except TranscriptsDisabled: return [] try: transcript = transcript_list.find_transcript(self.language) except NoTranscriptFound: en_transcript = transcript_list.find_transcript(["en"]) transcript = en_transcript.translate(self.translation) transcript_pieces = transcript.fetch() transcript = " ".join([t["text"].strip(" ") for t in transcript_pieces]) return [Document(page_content=transcript, metadata=metadata)] def _get_video_info(self) -> dict: """Get important video information. Components are: - title - description - thumbnail url, - publish_date - channel_author - and more. """ try: from pytube import YouTube except ImportError: raise ImportError( "Could not import pytube python package. " "Please install it with `pip install pytube`." ) yt = YouTube(f"https://www.youtube.com/watch?v={self.video_id}") video_info = { "title": yt.title or "Unknown", "description": yt.description or "Unknown", "view_count": yt.views or 0, "thumbnail_url": yt.thumbnail_url or "Unknown", "publish_date": yt.publish_date.strftime("%Y-%m-%d %H:%M:%S") if yt.publish_date else "Unknown", "length": yt.length or 0, "author": yt.author or "Unknown", } return video_info [docs]@dataclass class GoogleApiYoutubeLoader(BaseLoader): """Loader that loads all Videos from a Channel To use, you should have the ``googleapiclient,youtube_transcript_api``
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
f99a42fdbb29-5
To use, you should have the ``googleapiclient,youtube_transcript_api`` python package installed. As the service needs a google_api_client, you first have to initialize the GoogleApiClient. Additionally you have to either provide a channel name or a list of videoids "https://developers.google.com/docs/api/quickstart/python" Example: .. code-block:: python from langchain.document_loaders import GoogleApiClient from langchain.document_loaders import GoogleApiYoutubeLoader google_api_client = GoogleApiClient( service_account_path=Path("path_to_your_sec_file.json") ) loader = GoogleApiYoutubeLoader( google_api_client=google_api_client, channel_name = "CodeAesthetic" ) load.load() """ google_api_client: GoogleApiClient channel_name: Optional[str] = None video_ids: Optional[List[str]] = None add_video_info: bool = True captions_language: str = "en" continue_on_failure: bool = False def __post_init__(self) -> None: self.youtube_client = self._build_youtube_client(self.google_api_client.creds) def _build_youtube_client(self, creds: Any) -> Any: try: from googleapiclient.discovery import build from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401 except ImportError: raise ImportError( "You must run" "`pip install --upgrade " "google-api-python-client google-auth-httplib2 " "google-auth-oauthlib " "youtube-transcript-api` " "to use the Google Drive loader" )
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
f99a42fdbb29-6
"to use the Google Drive loader" ) return build("youtube", "v3", credentials=creds) [docs] @root_validator def validate_channel_or_videoIds_is_set( cls, values: Dict[str, Any] ) -> Dict[str, Any]: """Validate that either folder_id or document_ids is set, but not both.""" if not values.get("channel_name") and not values.get("video_ids"): raise ValueError("Must specify either channel_name or video_ids") return values def _get_transcripe_for_video_id(self, video_id: str) -> str: from youtube_transcript_api import NoTranscriptFound, YouTubeTranscriptApi transcript_list = YouTubeTranscriptApi.list_transcripts(video_id) try: transcript = transcript_list.find_transcript([self.captions_language]) except NoTranscriptFound: for available_transcript in transcript_list: transcript = available_transcript.translate(self.captions_language) continue transcript_pieces = transcript.fetch() return " ".join([t["text"].strip(" ") for t in transcript_pieces]) def _get_document_for_video_id(self, video_id: str, **kwargs: Any) -> Document: captions = self._get_transcripe_for_video_id(video_id) video_response = ( self.youtube_client.videos() .list( part="id,snippet", id=video_id, ) .execute() ) return Document( page_content=captions, metadata=video_response.get("items")[0], ) def _get_channel_id(self, channel_name: str) -> str: request = self.youtube_client.search().list(
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
f99a42fdbb29-7
request = self.youtube_client.search().list( part="id", q=channel_name, type="channel", maxResults=1, # we only need one result since channel names are unique ) response = request.execute() channel_id = response["items"][0]["id"]["channelId"] return channel_id def _get_document_for_channel(self, channel: str, **kwargs: Any) -> List[Document]: try: from youtube_transcript_api import ( NoTranscriptFound, TranscriptsDisabled, ) except ImportError: raise ImportError( "You must run" "`pip install --upgrade " "youtube-transcript-api` " "to use the youtube loader" ) channel_id = self._get_channel_id(channel) request = self.youtube_client.search().list( part="id,snippet", channelId=channel_id, maxResults=50, # adjust this value to retrieve more or fewer videos ) video_ids = [] while request is not None: response = request.execute() # Add each video ID to the list for item in response["items"]: if not item["id"].get("videoId"): continue meta_data = {"videoId": item["id"]["videoId"]} if self.add_video_info: item["snippet"].pop("thumbnails") meta_data.update(item["snippet"]) try: page_content = self._get_transcripe_for_video_id( item["id"]["videoId"] ) video_ids.append( Document( page_content=page_content, metadata=meta_data, ) )
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
f99a42fdbb29-8
metadata=meta_data, ) ) except (TranscriptsDisabled, NoTranscriptFound) as e: if self.continue_on_failure: logger.error( "Error fetching transscript " + f" {item['id']['videoId']}, exception: {e}" ) else: raise e pass request = self.youtube_client.search().list_next(request, response) return video_ids [docs] def load(self) -> List[Document]: """Load documents.""" document_list = [] if self.channel_name: document_list.extend(self._get_document_for_channel(self.channel_name)) elif self.video_ids: document_list.extend( [ self._get_document_for_video_id(video_id) for video_id in self.video_ids ] ) else: raise ValueError("Must specify either channel_name or video_ids") return document_list
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/youtube.html
a06439668ecf-0
Source code for langchain.document_loaders.gutenberg """Loader that loads .txt web files.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class GutenbergLoader(BaseLoader): """Loader that uses urllib to load .txt web files.""" def __init__(self, file_path: str): """Initialize with file path.""" if not file_path.startswith("https://www.gutenberg.org"): raise ValueError("file path must start with 'https://www.gutenberg.org'") if not file_path.endswith(".txt"): raise ValueError("file path must end with '.txt'") self.file_path = file_path [docs] def load(self) -> List[Document]: """Load file.""" from urllib.request import urlopen elements = urlopen(self.file_path) text = "\n\n".join([str(el.decode("utf-8-sig")) for el in elements]) metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/gutenberg.html
faefdfcc0f11-0
Source code for langchain.document_loaders.reddit """Reddit document loader.""" from __future__ import annotations from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: import praw def _dependable_praw_import() -> praw: try: import praw except ImportError: raise ValueError( "praw package not found, please install it with `pip install praw`" ) return praw [docs]class RedditPostsLoader(BaseLoader): """Reddit posts loader. Read posts on a subreddit. First you need to go to https://www.reddit.com/prefs/apps/ and create your application """ def __init__( self, client_id: str, client_secret: str, user_agent: str, search_queries: Sequence[str], mode: str, categories: Sequence[str] = ["new"], number_posts: Optional[int] = 10, ): self.client_id = client_id self.client_secret = client_secret self.user_agent = user_agent self.search_queries = search_queries self.mode = mode self.categories = categories self.number_posts = number_posts [docs] def load(self) -> List[Document]: """Load reddits.""" praw = _dependable_praw_import() reddit = praw.Reddit( client_id=self.client_id, client_secret=self.client_secret, user_agent=self.user_agent, ) results: List[Document] = [] if self.mode == "subreddit": for search_query in self.search_queries:
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/reddit.html
faefdfcc0f11-1
if self.mode == "subreddit": for search_query in self.search_queries: for category in self.categories: docs = self._subreddit_posts_loader( search_query=search_query, category=category, reddit=reddit ) results.extend(docs) elif self.mode == "username": for search_query in self.search_queries: for category in self.categories: docs = self._user_posts_loader( search_query=search_query, category=category, reddit=reddit ) results.extend(docs) else: raise ValueError( "mode not correct, please enter 'username' or 'subreddit' as mode" ) return results def _subreddit_posts_loader( self, search_query: str, category: str, reddit: praw.reddit.Reddit ) -> Iterable[Document]: subreddit = reddit.subreddit(search_query) method = getattr(subreddit, category) cat_posts = method(limit=self.number_posts) """Format reddit posts into a string.""" for post in cat_posts: metadata = { "post_subreddit": post.subreddit_name_prefixed, "post_category": category, "post_title": post.title, "post_score": post.score, "post_id": post.id, "post_url": post.url, "post_author": post.author, } yield Document( page_content=post.selftext, metadata=metadata, ) def _user_posts_loader( self, search_query: str, category: str, reddit: praw.reddit.Reddit ) -> Iterable[Document]: user = reddit.redditor(search_query) method = getattr(user.submissions, category)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/reddit.html
faefdfcc0f11-2
method = getattr(user.submissions, category) cat_posts = method(limit=self.number_posts) """Format reddit posts into a string.""" for post in cat_posts: metadata = { "post_subreddit": post.subreddit_name_prefixed, "post_category": category, "post_title": post.title, "post_score": post.score, "post_id": post.id, "post_url": post.url, "post_author": post.author, } yield Document( page_content=post.selftext, metadata=metadata, )
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/reddit.html
ea55e82fdcc1-0
Source code for langchain.document_loaders.figma """Loader that loads Figma files json dump.""" import json import urllib.request from typing import Any, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import stringify_dict [docs]class FigmaFileLoader(BaseLoader): """Loader that loads Figma file json.""" def __init__(self, access_token: str, ids: str, key: str): """Initialize with access token, ids, and key.""" self.access_token = access_token self.ids = ids self.key = key def _construct_figma_api_url(self) -> str: api_url = "https://api.figma.com/v1/files/%s/nodes?ids=%s" % ( self.key, self.ids, ) return api_url def _get_figma_file(self) -> Any: """Get Figma file from Figma REST API.""" headers = {"X-Figma-Token": self.access_token} request = urllib.request.Request( self._construct_figma_api_url(), headers=headers ) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) return json_data [docs] def load(self) -> List[Document]: """Load file""" data = self._get_figma_file() text = stringify_dict(data) metadata = {"source": self._construct_figma_api_url()} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/figma.html
eca25dbc6afe-0
Source code for langchain.document_loaders.modern_treasury """Loader that fetches data from Modern Treasury""" import json import urllib.request from base64 import b64encode from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env, stringify_value MODERN_TREASURY_ENDPOINTS = { "payment_orders": "https://app.moderntreasury.com/api/payment_orders", "expected_payments": "https://app.moderntreasury.com/api/expected_payments", "returns": "https://app.moderntreasury.com/api/returns", "incoming_payment_details": "https://app.moderntreasury.com/api/\ incoming_payment_details", "counterparties": "https://app.moderntreasury.com/api/counterparties", "internal_accounts": "https://app.moderntreasury.com/api/internal_accounts", "external_accounts": "https://app.moderntreasury.com/api/external_accounts", "transactions": "https://app.moderntreasury.com/api/transactions", "ledgers": "https://app.moderntreasury.com/api/ledgers", "ledger_accounts": "https://app.moderntreasury.com/api/ledger_accounts", "ledger_transactions": "https://app.moderntreasury.com/api/ledger_transactions", "events": "https://app.moderntreasury.com/api/events", "invoices": "https://app.moderntreasury.com/api/invoices", } [docs]class ModernTreasuryLoader(BaseLoader): """Loader that fetches data from Modern Treasury.""" def __init__( self, resource: str,
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/modern_treasury.html
eca25dbc6afe-1
def __init__( self, resource: str, organization_id: Optional[str] = None, api_key: Optional[str] = None, ) -> None: self.resource = resource organization_id = organization_id or get_from_env( "organization_id", "MODERN_TREASURY_ORGANIZATION_ID" ) api_key = api_key or get_from_env("api_key", "MODERN_TREASURY_API_KEY") credentials = f"{organization_id}:{api_key}".encode("utf-8") basic_auth_token = b64encode(credentials).decode("utf-8") self.headers = {"Authorization": f"Basic {basic_auth_token}"} def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_value(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = MODERN_TREASURY_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint) [docs] def load(self) -> List[Document]: return self._get_resource()
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/modern_treasury.html
958120c4ecb7-0
Source code for langchain.document_loaders.fauna from typing import Iterator, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class FaunaLoader(BaseLoader): """FaunaDB Loader. Attributes: query (str): The FQL query string to execute. page_content_field (str): The field that contains the content of each page. secret (str): The secret key for authenticating to FaunaDB. metadata_fields (Optional[Sequence[str]]): Optional list of field names to include in metadata. """ def __init__( self, query: str, page_content_field: str, secret: str, metadata_fields: Optional[Sequence[str]] = None, ): self.query = query self.page_content_field = page_content_field self.secret = secret self.metadata_fields = metadata_fields [docs] def load(self) -> List[Document]: return list(self.lazy_load()) [docs] def lazy_load(self) -> Iterator[Document]: try: from fauna import Page, fql from fauna.client import Client from fauna.encoding import QuerySuccess except ImportError: raise ImportError( "Could not import fauna python package. " "Please install it with `pip install fauna`." ) # Create Fauna Client client = Client(secret=self.secret) # Run FQL Query response: QuerySuccess = client.query(fql(self.query)) page: Page = response.data for result in page: if result is not None: document_dict = dict(result.items()) page_content = ""
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/fauna.html
958120c4ecb7-1
document_dict = dict(result.items()) page_content = "" for key, value in document_dict.items(): if key == self.page_content_field: page_content = value document: Document = Document( page_content=page_content, metadata={"id": result.id, "ts": result.ts}, ) yield document if page.after is not None: yield Document( page_content="Next Page Exists", metadata={"after": page.after}, )
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/fauna.html
eee3153e438b-0
Source code for langchain.document_loaders.excel """Loader that loads Microsoft Excel files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) [docs]class UnstructuredExcelLoader(UnstructuredFileLoader): """Loader that uses unstructured to load Microsoft Excel files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): validate_unstructured_version(min_unstructured_version="0.6.7") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.xlsx import partition_xlsx return partition_xlsx(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/excel.html
3aba7cd4d3eb-0
Source code for langchain.document_loaders.directory """Loading logic for loading documents from a directory.""" import concurrent import logging from pathlib import Path from typing import Any, List, Optional, Type, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.html_bs import BSHTMLLoader from langchain.document_loaders.text import TextLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader FILE_LOADER_TYPE = Union[ Type[UnstructuredFileLoader], Type[TextLoader], Type[BSHTMLLoader] ] logger = logging.getLogger(__name__) def _is_visible(p: Path) -> bool: parts = p.parts for _p in parts: if _p.startswith("."): return False return True [docs]class DirectoryLoader(BaseLoader): """Loading logic for loading documents from a directory.""" def __init__( self, path: str, glob: str = "**/[!.]*", silent_errors: bool = False, load_hidden: bool = False, loader_cls: FILE_LOADER_TYPE = UnstructuredFileLoader, loader_kwargs: Union[dict, None] = None, recursive: bool = False, show_progress: bool = False, use_multithreading: bool = False, max_concurrency: int = 4, ): """Initialize with path to directory and how to glob over it.""" if loader_kwargs is None: loader_kwargs = {} self.path = path self.glob = glob self.load_hidden = load_hidden self.loader_cls = loader_cls self.loader_kwargs = loader_kwargs self.silent_errors = silent_errors
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/directory.html
3aba7cd4d3eb-1
self.loader_kwargs = loader_kwargs self.silent_errors = silent_errors self.recursive = recursive self.show_progress = show_progress self.use_multithreading = use_multithreading self.max_concurrency = max_concurrency [docs] def load_file( self, item: Path, path: Path, docs: List[Document], pbar: Optional[Any] ) -> None: if item.is_file(): if _is_visible(item.relative_to(path)) or self.load_hidden: try: sub_docs = self.loader_cls(str(item), **self.loader_kwargs).load() docs.extend(sub_docs) except Exception as e: if self.silent_errors: logger.warning(e) else: raise e finally: if pbar: pbar.update(1) [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.path) if not p.exists(): raise FileNotFoundError(f"Directory not found: '{self.path}'") if not p.is_dir(): raise ValueError(f"Expected directory, got file: '{self.path}'") docs: List[Document] = [] items = list(p.rglob(self.glob) if self.recursive else p.glob(self.glob)) pbar = None if self.show_progress: try: from tqdm import tqdm pbar = tqdm(total=len(items)) except ImportError as e: logger.warning( "To log the progress of DirectoryLoader you need to install tqdm, " "`pip install tqdm`" ) if self.silent_errors: logger.warning(e) else: raise e
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/directory.html
3aba7cd4d3eb-2
logger.warning(e) else: raise e if self.use_multithreading: with concurrent.futures.ThreadPoolExecutor( max_workers=self.max_concurrency ) as executor: executor.map(lambda i: self.load_file(i, p, docs, pbar), items) else: for i in items: self.load_file(i, p, docs, pbar) if pbar: pbar.close() return docs #
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/directory.html
7c7fd639b620-0
Source code for langchain.document_loaders.facebook_chat """Loader that loads Facebook chat json dump.""" import datetime import json from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader def concatenate_rows(row: dict) -> str: """Combine message information in a readable format ready to be used.""" sender = row["sender_name"] text = row["content"] date = datetime.datetime.fromtimestamp(row["timestamp_ms"] / 1000).strftime( "%Y-%m-%d %H:%M:%S" ) return f"{sender} on {date}: {text}\n\n" [docs]class FacebookChatLoader(BaseLoader): """Loader that loads Facebook messages json directory dump.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) text = "".join( concatenate_rows(message) for message in d["messages"] if message.get("content") and isinstance(message["content"], str) ) metadata = {"source": str(p)} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/facebook_chat.html
3c116a18ec60-0
Source code for langchain.document_loaders.email """Loader that loads email files.""" import os from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, satisfies_min_unstructured_version, ) [docs]class UnstructuredEmailLoader(UnstructuredFileLoader): """Loader that uses unstructured to load email files.""" def _get_elements(self) -> List: from unstructured.file_utils.filetype import FileType, detect_filetype filetype = detect_filetype(self.file_path) if filetype == FileType.EML: from unstructured.partition.email import partition_email return partition_email(filename=self.file_path, **self.unstructured_kwargs) elif satisfies_min_unstructured_version("0.5.8") and filetype == FileType.MSG: from unstructured.partition.msg import partition_msg return partition_msg(filename=self.file_path, **self.unstructured_kwargs) else: raise ValueError( f"Filetype {filetype} is not supported in UnstructuredEmailLoader." ) [docs]class OutlookMessageLoader(BaseLoader): """ Loader that loads Outlook Message files using extract_msg. https://github.com/TeamMsgExtractor/msg-extractor """ def __init__(self, file_path: str): """Initialize with file path.""" self.file_path = file_path if not os.path.isfile(self.file_path): raise ValueError("File path %s is not a valid file" % self.file_path) try: import extract_msg # noqa:F401 except ImportError: raise ImportError( "extract_msg is not installed. Please install it with " "`pip install extract_msg`"
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/email.html
3c116a18ec60-1
"`pip install extract_msg`" ) [docs] def load(self) -> List[Document]: """Load data into document objects.""" import extract_msg msg = extract_msg.Message(self.file_path) return [ Document( page_content=msg.body, metadata={ "subject": msg.subject, "sender": msg.sender, "date": msg.date, }, ) ]
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/email.html
eb0de7427837-0
Source code for langchain.document_loaders.word_document """Loader that loads word documents.""" import os import tempfile from abc import ABC from typing import List from urllib.parse import urlparse import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class Docx2txtLoader(BaseLoader, ABC): """Loads a DOCX with docx2txt and chunks at character level. Defaults to check for local file, but if the file is a web path, it will download it to a temporary file, and use that, then clean up the temporary file after completion """ def __init__(self, file_path: str): """Initialize with file path.""" self.file_path = file_path if "~" in self.file_path: self.file_path = os.path.expanduser(self.file_path) # If the file is a web path, download it to a temporary file, and use that if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path): r = requests.get(self.file_path) if r.status_code != 200: raise ValueError( "Check the url of your file; returned status code %s" % r.status_code ) self.web_path = self.file_path self.temp_file = tempfile.NamedTemporaryFile() self.temp_file.write(r.content) self.file_path = self.temp_file.name elif not os.path.isfile(self.file_path): raise ValueError("File path %s is not a valid file or url" % self.file_path) def __del__(self) -> None: if hasattr(self, "temp_file"): self.temp_file.close()
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/word_document.html
eb0de7427837-1
if hasattr(self, "temp_file"): self.temp_file.close() [docs] def load(self) -> List[Document]: """Load given path as single page.""" import docx2txt return [ Document( page_content=docx2txt.process(self.file_path), metadata={"source": self.file_path}, ) ] @staticmethod def _is_valid_url(url: str) -> bool: """Check if the url is valid.""" parsed = urlparse(url) return bool(parsed.netloc) and bool(parsed.scheme) [docs]class UnstructuredWordDocumentLoader(UnstructuredFileLoader): """Loader that uses unstructured to load word documents.""" def _get_elements(self) -> List: from unstructured.__version__ import __version__ as __unstructured_version__ from unstructured.file_utils.filetype import FileType, detect_filetype unstructured_version = tuple( [int(x) for x in __unstructured_version__.split(".")] ) # NOTE(MthwRobinson) - magic will raise an import error if the libmagic # system dependency isn't installed. If it's not installed, we'll just # check the file extension try: import magic # noqa: F401 is_doc = detect_filetype(self.file_path) == FileType.DOC except ImportError: _, extension = os.path.splitext(str(self.file_path)) is_doc = extension == ".doc" if is_doc and unstructured_version < (0, 4, 11): raise ValueError( f"You are on unstructured version {__unstructured_version__}. "
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/word_document.html
eb0de7427837-2
f"You are on unstructured version {__unstructured_version__}. " "Partitioning .doc files is only supported in unstructured>=0.4.11. " "Please upgrade the unstructured package and try again." ) if is_doc: from unstructured.partition.doc import partition_doc return partition_doc(filename=self.file_path, **self.unstructured_kwargs) else: from unstructured.partition.docx import partition_docx return partition_docx(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/word_document.html
2fc629d91010-0
Source code for langchain.document_loaders.html_bs """Loader that uses bs4 to load HTML files, enriching metadata with page title.""" import logging from typing import Dict, List, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class BSHTMLLoader(BaseLoader): """Loader that uses beautiful soup to parse HTML files.""" def __init__( self, file_path: str, open_encoding: Union[str, None] = None, bs_kwargs: Union[dict, None] = None, get_text_separator: str = "", ) -> None: """Initialise with path, and optionally, file encoding to use, and any kwargs to pass to the BeautifulSoup object.""" try: import bs4 # noqa:F401 except ImportError: raise ValueError( "beautifulsoup4 package not found, please install it with " "`pip install beautifulsoup4`" ) self.file_path = file_path self.open_encoding = open_encoding if bs_kwargs is None: bs_kwargs = {"features": "lxml"} self.bs_kwargs = bs_kwargs self.get_text_separator = get_text_separator [docs] def load(self) -> List[Document]: from bs4 import BeautifulSoup """Load HTML document into document objects.""" with open(self.file_path, "r", encoding=self.open_encoding) as f: soup = BeautifulSoup(f, **self.bs_kwargs) text = soup.get_text(self.get_text_separator) if soup.title: title = str(soup.title.string) else: title = "" metadata: Dict[str, Union[str, None]] = {
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/html_bs.html
2fc629d91010-1
title = "" metadata: Dict[str, Union[str, None]] = { "source": self.file_path, "title": title, } return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/html_bs.html
ff34cf97aa34-0
Source code for langchain.document_loaders.stripe """Loader that fetches data from Stripe""" import json import urllib.request from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env, stringify_dict STRIPE_ENDPOINTS = { "balance_transactions": "https://api.stripe.com/v1/balance_transactions", "charges": "https://api.stripe.com/v1/charges", "customers": "https://api.stripe.com/v1/customers", "events": "https://api.stripe.com/v1/events", "refunds": "https://api.stripe.com/v1/refunds", "disputes": "https://api.stripe.com/v1/disputes", } [docs]class StripeLoader(BaseLoader): """Loader that fetches data from Stripe.""" def __init__(self, resource: str, access_token: Optional[str] = None) -> None: self.resource = resource access_token = access_token or get_from_env( "access_token", "STRIPE_ACCESS_TOKEN" ) self.headers = {"Authorization": f"Bearer {access_token}"} def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_dict(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = STRIPE_ENDPOINTS.get(self.resource) if endpoint is None: return []
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/stripe.html
ff34cf97aa34-1
if endpoint is None: return [] return self._make_request(endpoint) [docs] def load(self) -> List[Document]: return self._get_resource()
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/stripe.html
32848c64cfea-0
Source code for langchain.document_loaders.mastodon """Mastodon document loader.""" from __future__ import annotations import os from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: import mastodon def _dependable_mastodon_import() -> mastodon: try: import mastodon except ImportError: raise ValueError( "Mastodon.py package not found, " "please install it with `pip install Mastodon.py`" ) return mastodon [docs]class MastodonTootsLoader(BaseLoader): """Mastodon toots loader.""" def __init__( self, mastodon_accounts: Sequence[str], number_toots: Optional[int] = 100, exclude_replies: bool = False, access_token: Optional[str] = None, api_base_url: str = "https://mastodon.social", ): """Instantiate Mastodon toots loader. Args: mastodon_accounts: The list of Mastodon accounts to query. number_toots: How many toots to pull for each account. exclude_replies: Whether to exclude reply toots from the load. access_token: An access token if toots are loaded as a Mastodon app. Can also be specified via the environment variables "MASTODON_ACCESS_TOKEN". api_base_url: A Mastodon API base URL to talk to, if not using the default. """ mastodon = _dependable_mastodon_import() access_token = access_token or os.environ.get("MASTODON_ACCESS_TOKEN")
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/mastodon.html
32848c64cfea-1
access_token = access_token or os.environ.get("MASTODON_ACCESS_TOKEN") self.api = mastodon.Mastodon( access_token=access_token, api_base_url=api_base_url ) self.mastodon_accounts = mastodon_accounts self.number_toots = number_toots self.exclude_replies = exclude_replies [docs] def load(self) -> List[Document]: """Load toots into documents.""" results: List[Document] = [] for account in self.mastodon_accounts: user = self.api.account_lookup(account) toots = self.api.account_statuses( user.id, only_media=False, pinned=False, exclude_replies=self.exclude_replies, exclude_reblogs=True, limit=self.number_toots, ) docs = self._format_toots(toots, user) results.extend(docs) return results def _format_toots( self, toots: List[Dict[str, Any]], user_info: dict ) -> Iterable[Document]: """Format toots into documents. Adding user info, and selected toot fields into the metadata. """ for toot in toots: metadata = { "created_at": toot["created_at"], "user_info": user_info, "is_reply": toot["in_reply_to_id"] is not None, } yield Document( page_content=toot["content"], metadata=metadata, )
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/mastodon.html
63fc48ee47c0-0
Source code for langchain.document_loaders.discord """Load from Discord chat dump""" from __future__ import annotations from typing import TYPE_CHECKING, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: import pandas as pd [docs]class DiscordChatLoader(BaseLoader): """Load Discord chat logs.""" def __init__(self, chat_log: pd.DataFrame, user_id_col: str = "ID"): """Initialize with a Pandas DataFrame containing chat logs.""" if not isinstance(chat_log, pd.DataFrame): raise ValueError( f"Expected chat_log to be a pd.DataFrame, got {type(chat_log)}" ) self.chat_log = chat_log self.user_id_col = user_id_col [docs] def load(self) -> List[Document]: """Load all chat messages.""" result = [] for _, row in self.chat_log.iterrows(): user_id = row[self.user_id_col] metadata = row.to_dict() metadata.pop(self.user_id_col) result.append(Document(page_content=user_id, metadata=metadata)) return result
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/discord.html
617b4c62612d-0
Source code for langchain.document_loaders.merge from typing import Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class MergedDataLoader(BaseLoader): """Merge documents from a list of loaders""" def __init__(self, loaders: List): """Initialize with a list of loaders""" self.loaders = loaders [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load docs from each individual loader.""" for loader in self.loaders: # Check if lazy_load is implemented try: data = loader.lazy_load() except NotImplementedError: data = loader.load() for document in data: yield document [docs] def load(self) -> List[Document]: """Load docs.""" return list(self.lazy_load())
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/merge.html
d1ec046510ae-0
Source code for langchain.document_loaders.s3_file """Loading logic for loading documents from an s3 file.""" import os import tempfile from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class S3FileLoader(BaseLoader): """Loading logic for loading documents from s3.""" def __init__(self, bucket: str, key: str): """Initialize with bucket and key name.""" self.bucket = bucket self.key = key [docs] def load(self) -> List[Document]: """Load documents.""" try: import boto3 except ImportError: raise ImportError( "Could not import `boto3` python package. " "Please install it with `pip install boto3`." ) s3 = boto3.client("s3") with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.key}" os.makedirs(os.path.dirname(file_path), exist_ok=True) s3.download_file(self.bucket, self.key, file_path) loader = UnstructuredFileLoader(file_path) return loader.load()
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/s3_file.html
d191c1fdf567-0
Source code for langchain.document_loaders.epub """Loader that loads EPub files.""" from typing import List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, satisfies_min_unstructured_version, ) [docs]class UnstructuredEPubLoader(UnstructuredFileLoader): """Loader that uses unstructured to load epub files.""" def _get_elements(self) -> List: min_unstructured_version = "0.5.4" if not satisfies_min_unstructured_version(min_unstructured_version): raise ValueError( "Partitioning epub files is only supported in " f"unstructured>={min_unstructured_version}." ) from unstructured.partition.epub import partition_epub return partition_epub(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/epub.html
b0928310ca4e-0
Source code for langchain.document_loaders.duckdb_loader from typing import Dict, List, Optional, cast from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class DuckDBLoader(BaseLoader): """Loads a query result from DuckDB into a list of documents. Each document represents one row of the result. The `page_content_columns` are written into the `page_content` of the document. The `metadata_columns` are written into the `metadata` of the document. By default, all columns are written into the `page_content` and none into the `metadata`. """ def __init__( self, query: str, database: str = ":memory:", read_only: bool = False, config: Optional[Dict[str, str]] = None, page_content_columns: Optional[List[str]] = None, metadata_columns: Optional[List[str]] = None, ): self.query = query self.database = database self.read_only = read_only self.config = config or {} self.page_content_columns = page_content_columns self.metadata_columns = metadata_columns [docs] def load(self) -> List[Document]: try: import duckdb except ImportError: raise ImportError( "Could not import duckdb python package. " "Please install it with `pip install duckdb`." ) docs = [] with duckdb.connect( database=self.database, read_only=self.read_only, config=self.config ) as con: query_result = con.execute(self.query) results = query_result.fetchall() description = cast(list, query_result.description)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/duckdb_loader.html
b0928310ca4e-1
results = query_result.fetchall() description = cast(list, query_result.description) field_names = [c[0] for c in description] if self.page_content_columns is None: page_content_columns = field_names else: page_content_columns = self.page_content_columns if self.metadata_columns is None: metadata_columns = [] else: metadata_columns = self.metadata_columns for result in results: page_content = "\n".join( f"{column}: {result[field_names.index(column)]}" for column in page_content_columns ) metadata = { column: result[field_names.index(column)] for column in metadata_columns } doc = Document(page_content=page_content, metadata=metadata) docs.append(doc) return docs
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/duckdb_loader.html
5734d0ff7ae6-0
Source code for langchain.document_loaders.notebook """Loader that loads .ipynb notebook files.""" import json from pathlib import Path from typing import Any, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader def concatenate_cells( cell: dict, include_outputs: bool, max_output_length: int, traceback: bool ) -> str: """Combine cells information in a readable format ready to be used.""" cell_type = cell["cell_type"] source = cell["source"] output = cell["outputs"] if include_outputs and cell_type == "code" and output: if "ename" in output[0].keys(): error_name = output[0]["ename"] error_value = output[0]["evalue"] if traceback: traceback = output[0]["traceback"] return ( f"'{cell_type}' cell: '{source}'\n, gives error '{error_name}'," f" with description '{error_value}'\n" f"and traceback '{traceback}'\n\n" ) else: return ( f"'{cell_type}' cell: '{source}'\n, gives error '{error_name}'," f"with description '{error_value}'\n\n" ) elif output[0]["output_type"] == "stream": output = output[0]["text"] min_output = min(max_output_length, len(output)) return ( f"'{cell_type}' cell: '{source}'\n with " f"output: '{output[:min_output]}'\n\n" ) else: return f"'{cell_type}' cell: '{source}'\n\n"
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/notebook.html
5734d0ff7ae6-1
return f"'{cell_type}' cell: '{source}'\n\n" return "" def remove_newlines(x: Any) -> Any: """Remove recursively newlines, no matter the data structure they are stored in.""" import pandas as pd if isinstance(x, str): return x.replace("\n", "") elif isinstance(x, list): return [remove_newlines(elem) for elem in x] elif isinstance(x, pd.DataFrame): return x.applymap(remove_newlines) else: return x [docs]class NotebookLoader(BaseLoader): """Loader that loads .ipynb notebook files.""" def __init__( self, path: str, include_outputs: bool = False, max_output_length: int = 10, remove_newline: bool = False, traceback: bool = False, ): """Initialize with path.""" self.file_path = path self.include_outputs = include_outputs self.max_output_length = max_output_length self.remove_newline = remove_newline self.traceback = traceback [docs] def load( self, ) -> List[Document]: """Load documents.""" try: import pandas as pd except ImportError: raise ImportError( "pandas is needed for Notebook Loader, " "please install with `pip install pandas`" ) p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) data = pd.json_normalize(d["cells"]) filtered_data = data[["cell_type", "source", "outputs"]] if self.remove_newline:
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/notebook.html
5734d0ff7ae6-2
if self.remove_newline: filtered_data = filtered_data.applymap(remove_newlines) text = filtered_data.apply( lambda x: concatenate_cells( x, self.include_outputs, self.max_output_length, self.traceback ), axis=1, ).str.cat(sep=" ") metadata = {"source": str(p)} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/notebook.html
72e2c59683e7-0
Source code for langchain.document_loaders.odt """Loader that loads Open Office ODT files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) [docs]class UnstructuredODTLoader(UnstructuredFileLoader): """Loader that uses unstructured to load open office ODT files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): validate_unstructured_version(min_unstructured_version="0.6.3") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.odt import partition_odt return partition_odt(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/odt.html
5170790f3585-0
Source code for langchain.document_loaders.googledrive """Loader that loads data from Google Drive.""" # Prerequisites: # 1. Create a Google Cloud project # 2. Enable the Google Drive API: # https://console.cloud.google.com/flows/enableapi?apiid=drive.googleapis.com # 3. Authorize credentials for desktop app: # https://developers.google.com/drive/api/quickstart/python#authorize_credentials_for_a_desktop_application # noqa: E501 # 4. For service accounts visit # https://cloud.google.com/iam/docs/service-accounts-create import os from pathlib import Path from typing import Any, Dict, List, Optional, Sequence, Union from pydantic import BaseModel, root_validator, validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader SCOPES = ["https://www.googleapis.com/auth/drive.readonly"] [docs]class GoogleDriveLoader(BaseLoader, BaseModel): """Loader that loads Google Docs from Google Drive.""" service_account_key: Path = Path.home() / ".credentials" / "keys.json" credentials_path: Path = Path.home() / ".credentials" / "credentials.json" token_path: Path = Path.home() / ".credentials" / "token.json" folder_id: Optional[str] = None document_ids: Optional[List[str]] = None file_ids: Optional[List[str]] = None recursive: bool = False file_types: Optional[Sequence[str]] = None load_trashed_files: bool = False # NOTE(MthwRobinson) - changing the file_loader_cls to type here currently # results in pydantic validation errors file_loader_cls: Any = None
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/googledrive.html
5170790f3585-1
# results in pydantic validation errors file_loader_cls: Any = None file_loader_kwargs: Dict["str", Any] = {} @root_validator def validate_inputs(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Validate that either folder_id or document_ids is set, but not both.""" if values.get("folder_id") and ( values.get("document_ids") or values.get("file_ids") ): raise ValueError( "Cannot specify both folder_id and document_ids nor " "folder_id and file_ids" ) if ( not values.get("folder_id") and not values.get("document_ids") and not values.get("file_ids") ): raise ValueError("Must specify either folder_id, document_ids, or file_ids") file_types = values.get("file_types") if file_types: if values.get("document_ids") or values.get("file_ids"): raise ValueError( "file_types can only be given when folder_id is given," " (not when document_ids or file_ids are given)." ) type_mapping = { "document": "application/vnd.google-apps.document", "sheet": "application/vnd.google-apps.spreadsheet", "pdf": "application/pdf", } allowed_types = list(type_mapping.keys()) + list(type_mapping.values()) short_names = ", ".join([f"'{x}'" for x in type_mapping.keys()]) full_names = ", ".join([f"'{x}'" for x in type_mapping.values()]) for file_type in file_types: if file_type not in allowed_types: raise ValueError(
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/googledrive.html
5170790f3585-2
if file_type not in allowed_types: raise ValueError( f"Given file type {file_type} is not supported. " f"Supported values are: {short_names}; and " f"their full-form names: {full_names}" ) # replace short-form file types by full-form file types def full_form(x: str) -> str: return type_mapping[x] if x in type_mapping else x values["file_types"] = [full_form(file_type) for file_type in file_types] return values @validator("credentials_path") def validate_credentials_path(cls, v: Any, **kwargs: Any) -> Any: """Validate that credentials_path exists.""" if not v.exists(): raise ValueError(f"credentials_path {v} does not exist") return v def _load_credentials(self) -> Any: """Load credentials.""" # Adapted from https://developers.google.com/drive/api/v3/quickstart/python try: from google.auth import default from google.auth.transport.requests import Request from google.oauth2 import service_account from google.oauth2.credentials import Credentials from google_auth_oauthlib.flow import InstalledAppFlow except ImportError: raise ImportError( "You must run " "`pip install --upgrade " "google-api-python-client google-auth-httplib2 " "google-auth-oauthlib` " "to use the Google Drive loader." ) creds = None if self.service_account_key.exists(): return service_account.Credentials.from_service_account_file( str(self.service_account_key), scopes=SCOPES ) if self.token_path.exists():
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/googledrive.html
5170790f3585-3
) if self.token_path.exists(): creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) elif "GOOGLE_APPLICATION_CREDENTIALS" not in os.environ: creds, project = default() creds = creds.with_scopes(SCOPES) # no need to write to file if creds: return creds else: flow = InstalledAppFlow.from_client_secrets_file( str(self.credentials_path), SCOPES ) creds = flow.run_local_server(port=0) with open(self.token_path, "w") as token: token.write(creds.to_json()) return creds def _load_sheet_from_id(self, id: str) -> List[Document]: """Load a sheet and all tabs from an ID.""" from googleapiclient.discovery import build creds = self._load_credentials() sheets_service = build("sheets", "v4", credentials=creds) spreadsheet = sheets_service.spreadsheets().get(spreadsheetId=id).execute() sheets = spreadsheet.get("sheets", []) documents = [] for sheet in sheets: sheet_name = sheet["properties"]["title"] result = ( sheets_service.spreadsheets() .values() .get(spreadsheetId=id, range=sheet_name) .execute() ) values = result.get("values", []) header = values[0] for i, row in enumerate(values[1:], start=1): metadata = { "source": (
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/googledrive.html
5170790f3585-4
metadata = { "source": ( f"https://docs.google.com/spreadsheets/d/{id}/" f"edit?gid={sheet['properties']['sheetId']}" ), "title": f"{spreadsheet['properties']['title']} - {sheet_name}", "row": i, } content = [] for j, v in enumerate(row): title = header[j].strip() if len(header) > j else "" content.append(f"{title}: {v.strip()}") page_content = "\n".join(content) documents.append(Document(page_content=page_content, metadata=metadata)) return documents def _load_document_from_id(self, id: str) -> Document: """Load a document from an ID.""" from io import BytesIO from googleapiclient.discovery import build from googleapiclient.errors import HttpError from googleapiclient.http import MediaIoBaseDownload creds = self._load_credentials() service = build("drive", "v3", credentials=creds) file = service.files().get(fileId=id, supportsAllDrives=True).execute() request = service.files().export_media(fileId=id, mimeType="text/plain") fh = BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False try: while done is False: status, done = downloader.next_chunk() except HttpError as e: if e.resp.status == 404: print("File not found: {}".format(id)) else: print("An error occurred: {}".format(e)) text = fh.getvalue().decode("utf-8") metadata = {
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/googledrive.html
5170790f3585-5
text = fh.getvalue().decode("utf-8") metadata = { "source": f"https://docs.google.com/document/d/{id}/edit", "title": f"{file.get('name')}", } return Document(page_content=text, metadata=metadata) def _load_documents_from_folder( self, folder_id: str, *, file_types: Optional[Sequence[str]] = None ) -> List[Document]: """Load documents from a folder.""" from googleapiclient.discovery import build creds = self._load_credentials() service = build("drive", "v3", credentials=creds) files = self._fetch_files_recursive(service, folder_id) # If file types filter is provided, we'll filter by the file type. if file_types: _files = [f for f in files if f["mimeType"] in file_types] # type: ignore else: _files = files returns = [] for file in _files: if file["trashed"] and not self.load_trashed_files: continue elif file["mimeType"] == "application/vnd.google-apps.document": returns.append(self._load_document_from_id(file["id"])) # type: ignore elif file["mimeType"] == "application/vnd.google-apps.spreadsheet": returns.extend(self._load_sheet_from_id(file["id"])) # type: ignore elif ( file["mimeType"] == "application/pdf" or self.file_loader_cls is not None ): returns.extend(self._load_file_from_id(file["id"])) # type: ignore else: pass return returns def _fetch_files_recursive(
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/googledrive.html
5170790f3585-6
else: pass return returns def _fetch_files_recursive( self, service: Any, folder_id: str ) -> List[Dict[str, Union[str, List[str]]]]: """Fetch all files and subfolders recursively.""" results = ( service.files() .list( q=f"'{folder_id}' in parents", pageSize=1000, includeItemsFromAllDrives=True, supportsAllDrives=True, fields="nextPageToken, files(id, name, mimeType, parents, trashed)", ) .execute() ) files = results.get("files", []) returns = [] for file in files: if file["mimeType"] == "application/vnd.google-apps.folder": if self.recursive: returns.extend(self._fetch_files_recursive(service, file["id"])) else: returns.append(file) return returns def _load_documents_from_ids(self) -> List[Document]: """Load documents from a list of IDs.""" if not self.document_ids: raise ValueError("document_ids must be set") return [self._load_document_from_id(doc_id) for doc_id in self.document_ids] def _load_file_from_id(self, id: str) -> List[Document]: """Load a file from an ID.""" from io import BytesIO from googleapiclient.discovery import build from googleapiclient.http import MediaIoBaseDownload creds = self._load_credentials() service = build("drive", "v3", credentials=creds) file = service.files().get(fileId=id, supportsAllDrives=True).execute()
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/googledrive.html
5170790f3585-7
file = service.files().get(fileId=id, supportsAllDrives=True).execute() request = service.files().get_media(fileId=id) fh = BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() if self.file_loader_cls is not None: fh.seek(0) loader = self.file_loader_cls(file=fh, **self.file_loader_kwargs) docs = loader.load() for doc in docs: doc.metadata["source"] = f"https://drive.google.com/file/d/{id}/view" return docs else: from PyPDF2 import PdfReader content = fh.getvalue() pdf_reader = PdfReader(BytesIO(content)) return [ Document( page_content=page.extract_text(), metadata={ "source": f"https://drive.google.com/file/d/{id}/view", "title": f"{file.get('name')}", "page": i, }, ) for i, page in enumerate(pdf_reader.pages) ] def _load_file_from_ids(self) -> List[Document]: """Load files from a list of IDs.""" if not self.file_ids: raise ValueError("file_ids must be set") docs = [] for file_id in self.file_ids: docs.extend(self._load_file_from_id(file_id)) return docs [docs] def load(self) -> List[Document]: """Load documents.""" if self.folder_id: return self._load_documents_from_folder( self.folder_id, file_types=self.file_types ) elif self.document_ids:
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/googledrive.html
5170790f3585-8
) elif self.document_ids: return self._load_documents_from_ids() else: return self._load_file_from_ids()
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/googledrive.html
daad52c0cc3a-0
Source code for langchain.document_loaders.azure_blob_storage_file """Loading logic for loading documents from an Azure Blob Storage file.""" import os import tempfile from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class AzureBlobStorageFileLoader(BaseLoader): """Loading logic for loading documents from Azure Blob Storage.""" def __init__(self, conn_str: str, container: str, blob_name: str): """Initialize with connection string, container and blob name.""" self.conn_str = conn_str self.container = container self.blob = blob_name [docs] def load(self) -> List[Document]: """Load documents.""" try: from azure.storage.blob import BlobClient except ImportError as exc: raise ValueError( "Could not import azure storage blob python package. " "Please install it with `pip install azure-storage-blob`." ) from exc client = BlobClient.from_connection_string( conn_str=self.conn_str, container_name=self.container, blob_name=self.blob ) with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.container}/{self.blob}" os.makedirs(os.path.dirname(file_path), exist_ok=True) with open(f"{file_path}", "wb") as file: blob_data = client.download_blob() blob_data.readinto(file) loader = UnstructuredFileLoader(file_path) return loader.load()
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/azure_blob_storage_file.html
adf673aefdf8-0
Source code for langchain.document_loaders.twitter """Twitter document loader.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: import tweepy from tweepy import OAuth2BearerHandler, OAuthHandler def _dependable_tweepy_import() -> tweepy: try: import tweepy except ImportError: raise ImportError( "tweepy package not found, please install it with `pip install tweepy`" ) return tweepy [docs]class TwitterTweetLoader(BaseLoader): """Twitter tweets loader. Read tweets of user twitter handle. First you need to go to `https://developer.twitter.com/en/docs/twitter-api /getting-started/getting-access-to-the-twitter-api` to get your token. And create a v2 version of the app. """ def __init__( self, auth_handler: Union[OAuthHandler, OAuth2BearerHandler], twitter_users: Sequence[str], number_tweets: Optional[int] = 100, ): self.auth = auth_handler self.twitter_users = twitter_users self.number_tweets = number_tweets [docs] def load(self) -> List[Document]: """Load tweets.""" tweepy = _dependable_tweepy_import() api = tweepy.API(self.auth, parser=tweepy.parsers.JSONParser()) results: List[Document] = [] for username in self.twitter_users: tweets = api.user_timeline(screen_name=username, count=self.number_tweets) user = api.get_user(screen_name=username)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/twitter.html
adf673aefdf8-1
user = api.get_user(screen_name=username) docs = self._format_tweets(tweets, user) results.extend(docs) return results def _format_tweets( self, tweets: List[Dict[str, Any]], user_info: dict ) -> Iterable[Document]: """Format tweets into a string.""" for tweet in tweets: metadata = { "created_at": tweet["created_at"], "user_info": user_info, } yield Document( page_content=tweet["text"], metadata=metadata, ) [docs] @classmethod def from_bearer_token( cls, oauth2_bearer_token: str, twitter_users: Sequence[str], number_tweets: Optional[int] = 100, ) -> TwitterTweetLoader: """Create a TwitterTweetLoader from OAuth2 bearer token.""" tweepy = _dependable_tweepy_import() auth = tweepy.OAuth2BearerHandler(oauth2_bearer_token) return cls( auth_handler=auth, twitter_users=twitter_users, number_tweets=number_tweets, ) [docs] @classmethod def from_secrets( cls, access_token: str, access_token_secret: str, consumer_key: str, consumer_secret: str, twitter_users: Sequence[str], number_tweets: Optional[int] = 100, ) -> TwitterTweetLoader: """Create a TwitterTweetLoader from access tokens and secrets.""" tweepy = _dependable_tweepy_import() auth = tweepy.OAuthHandler( access_token=access_token, access_token_secret=access_token_secret,
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/twitter.html
adf673aefdf8-2
access_token=access_token, access_token_secret=access_token_secret, consumer_key=consumer_key, consumer_secret=consumer_secret, ) return cls( auth_handler=auth, twitter_users=twitter_users, number_tweets=number_tweets, )
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/twitter.html
abaecc45eab6-0
Source code for langchain.document_loaders.hn """Loader that loads HN.""" from typing import Any, List from langchain.docstore.document import Document from langchain.document_loaders.web_base import WebBaseLoader [docs]class HNLoader(WebBaseLoader): """Load Hacker News data from either main page results or the comments page.""" [docs] def load(self) -> List[Document]: """Get important HN webpage information. Components are: - title - content - source url, - time of post - author of the post - number of comments - rank of the post """ soup_info = self.scrape() if "item" in self.web_path: return self.load_comments(soup_info) else: return self.load_results(soup_info) [docs] def load_comments(self, soup_info: Any) -> List[Document]: """Load comments from a HN post.""" comments = soup_info.select("tr[class='athing comtr']") title = soup_info.select_one("tr[id='pagespace']").get("title") return [ Document( page_content=comment.text.strip(), metadata={"source": self.web_path, "title": title}, ) for comment in comments ] [docs] def load_results(self, soup: Any) -> List[Document]: """Load items from an HN page.""" items = soup.select("tr[class='athing']") documents = [] for lineItem in items: ranking = lineItem.select_one("span[class='rank']").text link = lineItem.find("span", {"class": "titleline"}).find("a").get("href")
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/hn.html
abaecc45eab6-1
title = lineItem.find("span", {"class": "titleline"}).text.strip() metadata = { "source": self.web_path, "title": title, "link": link, "ranking": ranking, } documents.append( Document( page_content=title, link=link, ranking=ranking, metadata=metadata ) ) return documents
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/hn.html
c5fbe0070dfc-0
Source code for langchain.document_loaders.gitbook """Loader that loads GitBook.""" from typing import Any, List, Optional from urllib.parse import urljoin, urlparse from langchain.docstore.document import Document from langchain.document_loaders.web_base import WebBaseLoader [docs]class GitbookLoader(WebBaseLoader): """Load GitBook data. 1. load from either a single page, or 2. load all (relative) paths in the navbar. """ def __init__( self, web_page: str, load_all_paths: bool = False, base_url: Optional[str] = None, content_selector: str = "main", ): """Initialize with web page and whether to load all paths. Args: web_page: The web page to load or the starting point from where relative paths are discovered. load_all_paths: If set to True, all relative paths in the navbar are loaded instead of only `web_page`. base_url: If `load_all_paths` is True, the relative paths are appended to this base url. Defaults to `web_page` if not set. """ self.base_url = base_url or web_page if self.base_url.endswith("/"): self.base_url = self.base_url[:-1] if load_all_paths: # set web_path to the sitemap if we want to crawl all paths web_paths = f"{self.base_url}/sitemap.xml" else: web_paths = web_page super().__init__(web_paths) self.load_all_paths = load_all_paths self.content_selector = content_selector [docs] def load(self) -> List[Document]:
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/gitbook.html
c5fbe0070dfc-1
[docs] def load(self) -> List[Document]: """Fetch text from one single GitBook page.""" if self.load_all_paths: soup_info = self.scrape() relative_paths = self._get_paths(soup_info) documents = [] for path in relative_paths: url = urljoin(self.base_url, path) print(f"Fetching text from {url}") soup_info = self._scrape(url) documents.append(self._get_document(soup_info, url)) return [d for d in documents if d] else: soup_info = self.scrape() documents = [self._get_document(soup_info, self.web_path)] return [d for d in documents if d] def _get_document( self, soup: Any, custom_url: Optional[str] = None ) -> Optional[Document]: """Fetch content from page and return Document.""" page_content_raw = soup.find(self.content_selector) if not page_content_raw: return None content = page_content_raw.get_text(separator="\n").strip() title_if_exists = page_content_raw.find("h1") title = title_if_exists.text if title_if_exists else "" metadata = {"source": custom_url or self.web_path, "title": title} return Document(page_content=content, metadata=metadata) def _get_paths(self, soup: Any) -> List[str]: """Fetch all relative paths in the navbar.""" return [urlparse(loc.text).path for loc in soup.find_all("loc")]
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/gitbook.html
a5c3c90f18d6-0
Source code for langchain.document_loaders.college_confidential """Loader that loads College Confidential.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.web_base import WebBaseLoader [docs]class CollegeConfidentialLoader(WebBaseLoader): """Loader that loads College Confidential webpages.""" [docs] def load(self) -> List[Document]: """Load webpage.""" soup = self.scrape() text = soup.select_one("main[class='skin-handler']").text metadata = {"source": self.web_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/college_confidential.html
b932e0098b30-0
Source code for langchain.document_loaders.powerpoint """Loader that loads powerpoint files.""" import os from typing import List from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class UnstructuredPowerPointLoader(UnstructuredFileLoader): """Loader that uses unstructured to load powerpoint files.""" def _get_elements(self) -> List: from unstructured.__version__ import __version__ as __unstructured_version__ from unstructured.file_utils.filetype import FileType, detect_filetype unstructured_version = tuple( [int(x) for x in __unstructured_version__.split(".")] ) # NOTE(MthwRobinson) - magic will raise an import error if the libmagic # system dependency isn't installed. If it's not installed, we'll just # check the file extension try: import magic # noqa: F401 is_ppt = detect_filetype(self.file_path) == FileType.PPT except ImportError: _, extension = os.path.splitext(str(self.file_path)) is_ppt = extension == ".ppt" if is_ppt and unstructured_version < (0, 4, 11): raise ValueError( f"You are on unstructured version {__unstructured_version__}. " "Partitioning .ppt files is only supported in unstructured>=0.4.11. " "Please upgrade the unstructured package and try again." ) if is_ppt: from unstructured.partition.ppt import partition_ppt return partition_ppt(filename=self.file_path, **self.unstructured_kwargs) else: from unstructured.partition.pptx import partition_pptx return partition_pptx(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/powerpoint.html
9a1d2e553f93-0
Source code for langchain.document_loaders.larksuite """Loader that loads LarkSuite (FeiShu) document json dump.""" import json import urllib.request from typing import Any, Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class LarkSuiteDocLoader(BaseLoader): """Loader that loads LarkSuite (FeiShu) document.""" def __init__(self, domain: str, access_token: str, document_id: str): """Initialize with domain, access_token (tenant / user), and document_id.""" self.domain = domain self.access_token = access_token self.document_id = document_id def _get_larksuite_api_json_data(self, api_url: str) -> Any: """Get LarkSuite (FeiShu) API response json data.""" headers = {"Authorization": f"Bearer {self.access_token}"} request = urllib.request.Request(api_url, headers=headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) return json_data [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load LarkSuite (FeiShu) document.""" api_url_prefix = f"{self.domain}/open-apis/docx/v1/documents" metadata_json = self._get_larksuite_api_json_data( f"{api_url_prefix}/{self.document_id}" ) raw_content_json = self._get_larksuite_api_json_data( f"{api_url_prefix}/{self.document_id}/raw_content" ) text = raw_content_json["data"]["content"] metadata = { "document_id": self.document_id,
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/larksuite.html