id
stringlengths
14
16
text
stringlengths
31
2.41k
source
stringlengths
53
121
a956ae3c2c32-3
if self.include_card_name: text_content = card.name + "\n" if card.description.strip(): text_content += BeautifulSoup(card.description, "lxml").get_text() if self.include_checklist: # Get all the checklist items on the card for checklist in card.checklists: if checklist.items: items = [ f"{item['name']}:{item['state']}" for item in checklist.items ] text_content += f"\n{checklist.name}\n" + "\n".join(items) if self.include_comments: # Get all the comments on the card comments = [ BeautifulSoup(comment["data"]["text"], "lxml").get_text() for comment in card.comments ] text_content += "Comments:" + "\n".join(comments) # Default metadata fields metadata = { "title": card.name, "id": card.id, "url": card.url, } # Extra metadata fields. Card object is not subscriptable. if "labels" in self.extra_metadata: metadata["labels"] = [label.name for label in card.labels] if "list" in self.extra_metadata: if card.list_id in list_dict: metadata["list"] = list_dict[card.list_id] if "closed" in self.extra_metadata: metadata["closed"] = card.closed if "due_date" in self.extra_metadata: metadata["due_date"] = card.due_date return Document(page_content=text_content, metadata=metadata)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/trello.html
23c877fca843-0
Source code for langchain.document_loaders.blackboard """Loader that loads all documents from a blackboard course.""" import contextlib import re from pathlib import Path from typing import Any, List, Optional, Tuple from urllib.parse import unquote from langchain.docstore.document import Document from langchain.document_loaders.directory import DirectoryLoader from langchain.document_loaders.pdf import PyPDFLoader from langchain.document_loaders.web_base import WebBaseLoader [docs]class BlackboardLoader(WebBaseLoader): """Loader that loads all documents from a Blackboard course. This loader is not compatible with all Blackboard courses. It is only compatible with courses that use the new Blackboard interface. To use this loader, you must have the BbRouter cookie. You can get this cookie by logging into the course and then copying the value of the BbRouter cookie from the browser's developer tools. Example: .. code-block:: python from langchain.document_loaders import BlackboardLoader loader = BlackboardLoader( blackboard_course_url="https://blackboard.example.com/webapps/blackboard/execute/announcement?method=search&context=course_entry&course_id=_123456_1", bbrouter="expires:12345...", ) documents = loader.load() """ base_url: str folder_path: str load_all_recursively: bool def __init__( self, blackboard_course_url: str, bbrouter: str, load_all_recursively: bool = True, basic_auth: Optional[Tuple[str, str]] = None, cookies: Optional[dict] = None, ): """Initialize with blackboard course url.
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blackboard.html
23c877fca843-1
): """Initialize with blackboard course url. The BbRouter cookie is required for most blackboard courses. Args: blackboard_course_url: Blackboard course url. bbrouter: BbRouter cookie. load_all_recursively: If True, load all documents recursively. basic_auth: Basic auth credentials. cookies: Cookies. Raises: ValueError: If blackboard course url is invalid. """ super().__init__(blackboard_course_url) # Get base url try: self.base_url = blackboard_course_url.split("/webapps/blackboard")[0] except IndexError: raise ValueError( "Invalid blackboard course url. " "Please provide a url that starts with " "https://<blackboard_url>/webapps/blackboard" ) if basic_auth is not None: self.session.auth = basic_auth # Combine cookies if cookies is None: cookies = {} cookies.update({"BbRouter": bbrouter}) self.session.cookies.update(cookies) self.load_all_recursively = load_all_recursively self.check_bs4() [docs] def check_bs4(self) -> None: """Check if BeautifulSoup4 is installed. Raises: ImportError: If BeautifulSoup4 is not installed. """ try: import bs4 # noqa: F401 except ImportError: raise ImportError( "BeautifulSoup4 is required for BlackboardLoader. " "Please install it with `pip install beautifulsoup4`." ) [docs] def load(self) -> List[Document]: """Load data into document objects. Returns: List of documents.
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blackboard.html
23c877fca843-2
"""Load data into document objects. Returns: List of documents. """ if self.load_all_recursively: soup_info = self.scrape() self.folder_path = self._get_folder_path(soup_info) relative_paths = self._get_paths(soup_info) documents = [] for path in relative_paths: url = self.base_url + path print(f"Fetching documents from {url}") soup_info = self._scrape(url) with contextlib.suppress(ValueError): documents.extend(self._get_documents(soup_info)) return documents else: print(f"Fetching documents from {self.web_path}") soup_info = self.scrape() self.folder_path = self._get_folder_path(soup_info) return self._get_documents(soup_info) def _get_folder_path(self, soup: Any) -> str: """Get the folder path to save the documents in. Args: soup: BeautifulSoup4 soup object. Returns: Folder path. """ # Get the course name course_name = soup.find("span", {"id": "crumb_1"}) if course_name is None: raise ValueError("No course name found.") course_name = course_name.text.strip() # Prepare the folder path course_name_clean = ( unquote(course_name) .replace(" ", "_") .replace("/", "_") .replace(":", "_") .replace(",", "_") .replace("?", "_") .replace("'", "_") .replace("!", "_") .replace('"', "_") ) # Get the folder path folder_path = Path(".") / course_name_clean
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blackboard.html
23c877fca843-3
# Get the folder path folder_path = Path(".") / course_name_clean return str(folder_path) def _get_documents(self, soup: Any) -> List[Document]: """Fetch content from page and return Documents. Args: soup: BeautifulSoup4 soup object. Returns: List of documents. """ attachments = self._get_attachments(soup) self._download_attachments(attachments) documents = self._load_documents() return documents def _get_attachments(self, soup: Any) -> List[str]: """Get all attachments from a page. Args: soup: BeautifulSoup4 soup object. Returns: List of attachments. """ from bs4 import BeautifulSoup, Tag # Get content list content_list = soup.find("ul", {"class": "contentList"}) if content_list is None: raise ValueError("No content list found.") content_list: BeautifulSoup # type: ignore # Get all attachments attachments = [] for attachment in content_list.find_all("ul", {"class": "attachments"}): attachment: Tag # type: ignore for link in attachment.find_all("a"): link: Tag # type: ignore href = link.get("href") # Only add if href is not None and does not start with # if href is not None and not href.startswith("#"): attachments.append(href) return attachments def _download_attachments(self, attachments: List[str]) -> None: """Download all attachments. Args: attachments: List of attachments. """ # Make sure the folder exists Path(self.folder_path).mkdir(parents=True, exist_ok=True)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blackboard.html
23c877fca843-4
Path(self.folder_path).mkdir(parents=True, exist_ok=True) # Download all attachments for attachment in attachments: self.download(attachment) def _load_documents(self) -> List[Document]: """Load all documents in the folder. Returns: List of documents. """ # Create the document loader loader = DirectoryLoader( path=self.folder_path, glob="*.pdf", loader_cls=PyPDFLoader # type: ignore ) # Load the documents documents = loader.load() # Return all documents return documents def _get_paths(self, soup: Any) -> List[str]: """Get all relative paths in the navbar.""" relative_paths = [] course_menu = soup.find("ul", {"class": "courseMenu"}) if course_menu is None: raise ValueError("No course menu found.") for link in course_menu.find_all("a"): href = link.get("href") if href is not None and href.startswith("/"): relative_paths.append(href) return relative_paths [docs] def download(self, path: str) -> None: """Download a file from a url. Args: path: Path to the file. """ # Get the file content response = self.session.get(self.base_url + path, allow_redirects=True) # Get the filename filename = self.parse_filename(response.url) # Write the file to disk with open(Path(self.folder_path) / filename, "wb") as f: f.write(response.content) [docs] def parse_filename(self, url: str) -> str: """Parse the filename from a url. Args:
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blackboard.html
23c877fca843-5
"""Parse the filename from a url. Args: url: Url to parse the filename from. Returns: The filename. """ if (url_path := Path(url)) and url_path.suffix == ".pdf": return url_path.name else: return self._parse_filename_from_url(url) def _parse_filename_from_url(self, url: str) -> str: """Parse the filename from a url. Args: url: Url to parse the filename from. Returns: The filename. Raises: ValueError: If the filename could not be parsed. """ filename_matches = re.search(r"filename%2A%3DUTF-8%27%27(.+)", url) if filename_matches: filename = filename_matches.group(1) else: raise ValueError(f"Could not parse filename from {url}") if ".pdf" not in filename: raise ValueError(f"Incorrect file type: {filename}") filename = filename.split(".pdf")[0] + ".pdf" filename = unquote(filename) filename = filename.replace("%20", " ") return filename if __name__ == "__main__": loader = BlackboardLoader( "https://<YOUR BLACKBOARD URL" " HERE>/webapps/blackboard/content/listContent.jsp?course_id=_<YOUR COURSE ID" " HERE>_1&content_id=_<YOUR CONTENT ID HERE>_1&mode=reset", "<YOUR BBROUTER COOKIE HERE>", load_all_recursively=True, ) documents = loader.load() print(f"Loaded {len(documents)} pages of PDFs from {loader.web_path}")
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blackboard.html
c4cf076174ff-0
Source code for langchain.document_loaders.tencent_cos_file """Loading logic for loading documents from Tencent Cloud COS file.""" import os import tempfile from typing import Any, Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class TencentCOSFileLoader(BaseLoader): """Loading logic for loading documents from Tencent Cloud COS.""" def __init__(self, conf: Any, bucket: str, key: str): """Initialize with COS config, bucket and key name. :param conf(CosConfig): COS config. :param bucket(str): COS bucket. :param key(str): COS file key. """ self.conf = conf self.bucket = bucket self.key = key [docs] def load(self) -> List[Document]: return list(self.lazy_load()) [docs] def lazy_load(self) -> Iterator[Document]: """Load documents.""" try: from qcloud_cos import CosS3Client except ImportError: raise ValueError( "Could not import cos-python-sdk-v5 python package. " "Please install it with `pip install cos-python-sdk-v5`." ) # Initialise a client client = CosS3Client(self.conf) with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.bucket}/{self.key}" os.makedirs(os.path.dirname(file_path), exist_ok=True) # Download the file to a destination client.download_file( Bucket=self.bucket, Key=self.key, DestFilePath=file_path ) loader = UnstructuredFileLoader(file_path)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/tencent_cos_file.html
c4cf076174ff-1
) loader = UnstructuredFileLoader(file_path) # UnstructuredFileLoader not implement lazy_load yet return iter(loader.load())
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/tencent_cos_file.html
1b3cba983cbf-0
Source code for langchain.document_loaders.diffbot """Loader that uses Diffbot to load webpages in text format.""" import logging from typing import Any, List import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class DiffbotLoader(BaseLoader): """Loader that loads Diffbot file json.""" def __init__( self, api_token: str, urls: List[str], continue_on_failure: bool = True ): """Initialize with API token, ids, and key.""" self.api_token = api_token self.urls = urls self.continue_on_failure = continue_on_failure def _diffbot_api_url(self, diffbot_api: str) -> str: return f"https://api.diffbot.com/v3/{diffbot_api}" def _get_diffbot_data(self, url: str) -> Any: """Get Diffbot file from Diffbot REST API.""" # TODO: Add support for other Diffbot APIs diffbot_url = self._diffbot_api_url("article") params = { "token": self.api_token, "url": url, } response = requests.get(diffbot_url, params=params, timeout=10) # TODO: handle non-ok errors return response.json() if response.ok else {} [docs] def load(self) -> List[Document]: """Extract text from Diffbot on all the URLs and return Document instances""" docs: List[Document] = list() for url in self.urls: try: data = self._get_diffbot_data(url) text = data["objects"][0]["text"] if "objects" in data else ""
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/diffbot.html
1b3cba983cbf-1
text = data["objects"][0]["text"] if "objects" in data else "" metadata = {"source": url} docs.append(Document(page_content=text, metadata=metadata)) except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching or processing {url}, exception: {e}") else: raise e return docs
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/diffbot.html
ebb2969f6aeb-0
Source code for langchain.document_loaders.notiondb """Notion DB loader for langchain""" from typing import Any, Dict, List, Optional import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader NOTION_BASE_URL = "https://api.notion.com/v1" DATABASE_URL = NOTION_BASE_URL + "/databases/{database_id}/query" PAGE_URL = NOTION_BASE_URL + "/pages/{page_id}" BLOCK_URL = NOTION_BASE_URL + "/blocks/{block_id}/children" [docs]class NotionDBLoader(BaseLoader): """Notion DB Loader. Reads content from pages within a Noton Database. Args: integration_token (str): Notion integration token. database_id (str): Notion database id. request_timeout_sec (int): Timeout for Notion requests in seconds. """ def __init__( self, integration_token: str, database_id: str, request_timeout_sec: Optional[int] = 10, ) -> None: """Initialize with parameters.""" if not integration_token: raise ValueError("integration_token must be provided") if not database_id: raise ValueError("database_id must be provided") self.token = integration_token self.database_id = database_id self.headers = { "Authorization": "Bearer " + self.token, "Content-Type": "application/json", "Notion-Version": "2022-06-28", } self.request_timeout_sec = request_timeout_sec [docs] def load(self) -> List[Document]: """Load documents from the Notion database. Returns: List[Document]: List of documents. """
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/notiondb.html
ebb2969f6aeb-1
Returns: List[Document]: List of documents. """ page_summaries = self._retrieve_page_summaries() return list(self.load_page(page_summary) for page_summary in page_summaries) def _retrieve_page_summaries( self, query_dict: Dict[str, Any] = {"page_size": 100} ) -> List[Dict[str, Any]]: """Get all the pages from a Notion database.""" pages: List[Dict[str, Any]] = [] while True: data = self._request( DATABASE_URL.format(database_id=self.database_id), method="POST", query_dict=query_dict, ) pages.extend(data.get("results")) if not data.get("has_more"): break query_dict["start_cursor"] = data.get("next_cursor") return pages [docs] def load_page(self, page_summary: Dict[str, Any]) -> Document: """Read a page.""" page_id = page_summary["id"] # load properties as metadata metadata: Dict[str, Any] = {} for prop_name, prop_data in page_summary["properties"].items(): prop_type = prop_data["type"] if prop_type == "rich_text": value = ( prop_data["rich_text"][0]["plain_text"] if prop_data["rich_text"] else None ) elif prop_type == "title": value = ( prop_data["title"][0]["plain_text"] if prop_data["title"] else None ) elif prop_type == "multi_select": value = ( [item["name"] for item in prop_data["multi_select"]] if prop_data["multi_select"]
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/notiondb.html
ebb2969f6aeb-2
if prop_data["multi_select"] else [] ) elif prop_type == "url": value = prop_data["url"] else: value = None metadata[prop_name.lower()] = value metadata["id"] = page_id return Document(page_content=self._load_blocks(page_id), metadata=metadata) def _load_blocks(self, block_id: str, num_tabs: int = 0) -> str: """Read a block and its children.""" result_lines_arr: List[str] = [] cur_block_id: str = block_id while cur_block_id: data = self._request(BLOCK_URL.format(block_id=cur_block_id)) for result in data["results"]: result_obj = result[result["type"]] if "rich_text" not in result_obj: continue cur_result_text_arr: List[str] = [] for rich_text in result_obj["rich_text"]: if "text" in rich_text: cur_result_text_arr.append( "\t" * num_tabs + rich_text["text"]["content"] ) if result["has_children"]: children_text = self._load_blocks( result["id"], num_tabs=num_tabs + 1 ) cur_result_text_arr.append(children_text) result_lines_arr.append("\n".join(cur_result_text_arr)) cur_block_id = data.get("next_cursor") return "\n".join(result_lines_arr) def _request( self, url: str, method: str = "GET", query_dict: Dict[str, Any] = {} ) -> Any: res = requests.request( method, url, headers=self.headers, json=query_dict,
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/notiondb.html
ebb2969f6aeb-3
method, url, headers=self.headers, json=query_dict, timeout=self.request_timeout_sec, ) res.raise_for_status() return res.json()
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/notiondb.html
3277bff3e595-0
Source code for langchain.document_loaders.onedrive """Loader that loads data from OneDrive""" from __future__ import annotations import logging import os import tempfile from enum import Enum from pathlib import Path from typing import TYPE_CHECKING, Dict, List, Optional, Type, Union from pydantic import BaseModel, BaseSettings, Field, FilePath, SecretStr from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.onedrive_file import OneDriveFileLoader if TYPE_CHECKING: from O365 import Account from O365.drive import Drive, Folder SCOPES = ["offline_access", "Files.Read.All"] logger = logging.getLogger(__name__) class _OneDriveSettings(BaseSettings): client_id: str = Field(..., env="O365_CLIENT_ID") client_secret: SecretStr = Field(..., env="O365_CLIENT_SECRET") class Config: env_prefix = "" case_sentive = False env_file = ".env" class _OneDriveTokenStorage(BaseSettings): token_path: FilePath = Field(Path.home() / ".credentials" / "o365_token.txt") class _FileType(str, Enum): DOC = "doc" DOCX = "docx" PDF = "pdf" class _SupportedFileTypes(BaseModel): file_types: List[_FileType] def fetch_mime_types(self) -> Dict[str, str]: mime_types_mapping = {} for file_type in self.file_types: if file_type.value == "doc": mime_types_mapping[file_type.value] = "application/msword" elif file_type.value == "docx": mime_types_mapping[ file_type.value
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/onedrive.html
3277bff3e595-1
mime_types_mapping[ file_type.value ] = "application/vnd.openxmlformats-officedocument.wordprocessingml.document" # noqa: E501 elif file_type.value == "pdf": mime_types_mapping[file_type.value] = "application/pdf" return mime_types_mapping [docs]class OneDriveLoader(BaseLoader, BaseModel): settings: _OneDriveSettings = Field(default_factory=_OneDriveSettings) drive_id: str = Field(...) folder_path: Optional[str] = None object_ids: Optional[List[str]] = None auth_with_token: bool = False def _auth(self) -> Type[Account]: """ Authenticates the OneDrive API client using the specified authentication method and returns the Account object. Returns: Type[Account]: The authenticated Account object. """ try: from O365 import FileSystemTokenBackend except ImportError: raise ImportError( "O365 package not found, please install it with `pip install o365`" ) if self.auth_with_token: token_storage = _OneDriveTokenStorage() token_path = token_storage.token_path token_backend = FileSystemTokenBackend( token_path=token_path.parent, token_filename=token_path.name ) account = Account( credentials=( self.settings.client_id, self.settings.client_secret.get_secret_value(), ), scopes=SCOPES, token_backend=token_backend, **{"raise_http_errors": False}, ) else: token_backend = FileSystemTokenBackend( token_path=Path.home() / ".credentials" ) account = Account( credentials=( self.settings.client_id,
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/onedrive.html
3277bff3e595-2
) account = Account( credentials=( self.settings.client_id, self.settings.client_secret.get_secret_value(), ), scopes=SCOPES, token_backend=token_backend, **{"raise_http_errors": False}, ) # make the auth account.authenticate() return account def _get_folder_from_path(self, drive: Type[Drive]) -> Union[Folder, Drive]: """ Returns the folder or drive object located at the specified path relative to the given drive. Args: drive (Type[Drive]): The root drive from which the folder path is relative. Returns: Union[Folder, Drive]: The folder or drive object located at the specified path. Raises: FileNotFoundError: If the path does not exist. """ subfolder_drive = drive if self.folder_path is None: return subfolder_drive subfolders = [f for f in self.folder_path.split("/") if f != ""] if len(subfolders) == 0: return subfolder_drive items = subfolder_drive.get_items() for subfolder in subfolders: try: subfolder_drive = list(filter(lambda x: subfolder in x.name, items))[0] items = subfolder_drive.get_items() except (IndexError, AttributeError): raise FileNotFoundError("Path {} not exist.".format(self.folder_path)) return subfolder_drive def _load_from_folder(self, folder: Type[Folder]) -> List[Document]: """ Loads all supported document files from the specified folder and returns a list of Document objects. Args: folder (Type[Folder]): The folder object to load the documents from.
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/onedrive.html
3277bff3e595-3
folder (Type[Folder]): The folder object to load the documents from. Returns: List[Document]: A list of Document objects representing the loaded documents. """ docs = [] file_types = _SupportedFileTypes(file_types=["doc", "docx", "pdf"]) file_mime_types = file_types.fetch_mime_types() items = folder.get_items() with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}" os.makedirs(os.path.dirname(file_path), exist_ok=True) for file in items: if file.is_file: if file.mime_type in list(file_mime_types.values()): loader = OneDriveFileLoader(file=file) docs.extend(loader.load()) return docs def _load_from_object_ids(self, drive: Type[Drive]) -> List[Document]: """ Loads all supported document files from the specified OneDrive drive based on their object IDs and returns a list of Document objects. Args: drive (Type[Drive]): The OneDrive drive object to load the documents from. Returns: List[Document]: A list of Document objects representing the loaded documents. """ docs = [] file_types = _SupportedFileTypes(file_types=["doc", "docx", "pdf"]) file_mime_types = file_types.fetch_mime_types() with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}" os.makedirs(os.path.dirname(file_path), exist_ok=True) for object_id in self.object_ids if self.object_ids else [""]: file = drive.get_item(object_id) if not file: logging.warning( "There isn't a file with "
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/onedrive.html
3277bff3e595-4
logging.warning( "There isn't a file with " f"object_id {object_id} in drive {drive}." ) continue if file.is_file: if file.mime_type in list(file_mime_types.values()): loader = OneDriveFileLoader(file=file) docs.extend(loader.load()) return docs [docs] def load(self) -> List[Document]: """ Loads all supported document files from the specified OneDrive drive a nd returns a list of Document objects. Returns: List[Document]: A list of Document objects representing the loaded documents. Raises: ValueError: If the specified drive ID does not correspond to a drive in the OneDrive storage. """ account = self._auth() storage = account.storage() drive = storage.get_drive(self.drive_id) docs: List[Document] = [] if not drive: raise ValueError(f"There isn't a drive with id {self.drive_id}.") if self.folder_path: folder = self._get_folder_from_path(drive=drive) docs.extend(self._load_from_folder(folder=folder)) elif self.object_ids: docs.extend(self._load_from_object_ids(drive=drive)) return docs
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/onedrive.html
eb3d3157a888-0
Source code for langchain.document_loaders.arxiv from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.arxiv import ArxivAPIWrapper [docs]class ArxivLoader(BaseLoader): """Loads a query result from arxiv.org into a list of Documents. Each document represents one Document. The loader converts the original PDF format into the text. """ def __init__( self, query: str, load_max_docs: Optional[int] = 100, load_all_available_meta: Optional[bool] = False, ): self.query = query self.load_max_docs = load_max_docs self.load_all_available_meta = load_all_available_meta [docs] def load(self) -> List[Document]: arxiv_client = ArxivAPIWrapper( load_max_docs=self.load_max_docs, load_all_available_meta=self.load_all_available_meta, ) docs = arxiv_client.load(self.query) return docs
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/arxiv.html
d0a5f55e6fcd-0
Source code for langchain.document_loaders.apify_dataset """Logic for loading documents from Apify datasets.""" from typing import Any, Callable, Dict, List from pydantic import BaseModel, root_validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class ApifyDatasetLoader(BaseLoader, BaseModel): """Logic for loading documents from Apify datasets.""" apify_client: Any dataset_id: str """The ID of the dataset on the Apify platform.""" dataset_mapping_function: Callable[[Dict], Document] """A custom function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class.""" def __init__( self, dataset_id: str, dataset_mapping_function: Callable[[Dict], Document] ): """Initialize the loader with an Apify dataset ID and a mapping function. Args: dataset_id (str): The ID of the dataset on the Apify platform. dataset_mapping_function (Callable): A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. """ super().__init__( dataset_id=dataset_id, dataset_mapping_function=dataset_mapping_function ) @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate environment.""" try: from apify_client import ApifyClient values["apify_client"] = ApifyClient() except ImportError: raise ImportError( "Could not import apify-client Python package. " "Please install it with `pip install apify-client`." ) return values
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/apify_dataset.html
d0a5f55e6fcd-1
) return values [docs] def load(self) -> List[Document]: """Load documents.""" dataset_items = ( self.apify_client.dataset(self.dataset_id).list_items(clean=True).items ) return list(map(self.dataset_mapping_function, dataset_items))
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/apify_dataset.html
09c78b37945c-0
Source code for langchain.document_loaders.airtable from typing import Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class AirtableLoader(BaseLoader): """Loader for Airtable tables.""" def __init__(self, api_token: str, table_id: str, base_id: str): """Initialize with API token and the IDs for table and base""" self.api_token = api_token self.table_id = table_id self.base_id = base_id [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load records from table.""" from pyairtable import Table table = Table(self.api_token, self.base_id, self.table_id) records = table.all() for record in records: # Need to convert record from dict to str yield Document( page_content=str(record), metadata={ "source": self.base_id + "_" + self.table_id, "base_id": self.base_id, "table_id": self.table_id, }, ) [docs] def load(self) -> List[Document]: """Load Table.""" return list(self.lazy_load())
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/airtable.html
27758948a7ed-0
Source code for langchain.document_loaders.github from abc import ABC from datetime import datetime from typing import Dict, Iterator, List, Literal, Optional, Union import requests from pydantic import BaseModel, root_validator, validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_dict_or_env class BaseGitHubLoader(BaseLoader, BaseModel, ABC): """Load issues of a GitHub repository.""" repo: str """Name of repository""" access_token: str """Personal access token - see https://github.com/settings/tokens?type=beta""" @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that access token exists in environment.""" values["access_token"] = get_from_dict_or_env( values, "access_token", "GITHUB_PERSONAL_ACCESS_TOKEN" ) return values @property def headers(self) -> Dict[str, str]: return { "Accept": "application/vnd.github+json", "Authorization": f"Bearer {self.access_token}", } [docs]class GitHubIssuesLoader(BaseGitHubLoader): include_prs: bool = True """If True include Pull Requests in results, otherwise ignore them.""" milestone: Union[int, Literal["*", "none"], None] = None """If integer is passed, it should be a milestone's number field. If the string '*' is passed, issues with any milestone are accepted. If the string 'none' is passed, issues without milestones are returned. """ state: Optional[Literal["open", "closed", "all"]] = None
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/github.html
27758948a7ed-1
state: Optional[Literal["open", "closed", "all"]] = None """Filter on issue state. Can be one of: 'open', 'closed', 'all'.""" assignee: Optional[str] = None """Filter on assigned user. Pass 'none' for no user and '*' for any user.""" creator: Optional[str] = None """Filter on the user that created the issue.""" mentioned: Optional[str] = None """Filter on a user that's mentioned in the issue.""" labels: Optional[List[str]] = None """Label names to filter one. Example: bug,ui,@high.""" sort: Optional[Literal["created", "updated", "comments"]] = None """What to sort results by. Can be one of: 'created', 'updated', 'comments'. Default is 'created'.""" direction: Optional[Literal["asc", "desc"]] = None """The direction to sort the results by. Can be one of: 'asc', 'desc'.""" since: Optional[str] = None """Only show notifications updated after the given time. This is a timestamp in ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ.""" @validator("since") def validate_since(cls, v: Optional[str]) -> Optional[str]: if v: try: datetime.strptime(v, "%Y-%m-%dT%H:%M:%SZ") except ValueError: raise ValueError( "Invalid value for 'since'. Expected a date string in " f"YYYY-MM-DDTHH:MM:SSZ format. Received: {v}" ) return v [docs] def lazy_load(self) -> Iterator[Document]: """
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/github.html
27758948a7ed-2
[docs] def lazy_load(self) -> Iterator[Document]: """ Get issues of a GitHub repository. Returns: A list of Documents with attributes: - page_content - metadata - url - title - creator - created_at - last_update_time - closed_time - number of comments - state - labels - assignee - assignees - milestone - locked - number - is_pull_request """ url: Optional[str] = self.url while url: response = requests.get(url, headers=self.headers) response.raise_for_status() issues = response.json() for issue in issues: doc = self.parse_issue(issue) if not self.include_prs and doc.metadata["is_pull_request"]: continue yield doc if response.links and response.links.get("next"): url = response.links["next"]["url"] else: url = None [docs] def load(self) -> List[Document]: """ Get issues of a GitHub repository. Returns: A list of Documents with attributes: - page_content - metadata - url - title - creator - created_at - last_update_time - closed_time - number of comments - state - labels - assignee - assignees - milestone - locked - number - is_pull_request """ return list(self.lazy_load()) [docs] def parse_issue(self, issue: dict) -> Document: """Create Document objects from a list of GitHub issues.""" metadata = {
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/github.html
27758948a7ed-3
"""Create Document objects from a list of GitHub issues.""" metadata = { "url": issue["html_url"], "title": issue["title"], "creator": issue["user"]["login"], "created_at": issue["created_at"], "comments": issue["comments"], "state": issue["state"], "labels": [label["name"] for label in issue["labels"]], "assignee": issue["assignee"]["login"] if issue["assignee"] else None, "milestone": issue["milestone"]["title"] if issue["milestone"] else None, "locked": issue["locked"], "number": issue["number"], "is_pull_request": "pull_request" in issue, } content = issue["body"] if issue["body"] is not None else "" return Document(page_content=content, metadata=metadata) @property def query_params(self) -> str: labels = ",".join(self.labels) if self.labels else self.labels query_params_dict = { "milestone": self.milestone, "state": self.state, "assignee": self.assignee, "creator": self.creator, "mentioned": self.mentioned, "labels": labels, "sort": self.sort, "direction": self.direction, "since": self.since, } query_params_list = [ f"{k}={v}" for k, v in query_params_dict.items() if v is not None ] query_params = "&".join(query_params_list) return query_params @property def url(self) -> str:
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/github.html
27758948a7ed-4
return query_params @property def url(self) -> str: return f"https://api.github.com/repos/{self.repo}/issues?{self.query_params}"
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/github.html
225539bd6353-0
Source code for langchain.document_loaders.chatgpt """Load conversations from ChatGPT data export""" import datetime import json from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader def concatenate_rows(message: dict, title: str) -> str: """ Combine message information in a readable format ready to be used. Args: message: Message to be concatenated title: Title of the conversation Returns: Concatenated message """ if not message: return "" sender = message["author"]["role"] if message["author"] else "unknown" text = message["content"]["parts"][0] date = datetime.datetime.fromtimestamp(message["create_time"]).strftime( "%Y-%m-%d %H:%M:%S" ) return f"{title} - {sender} on {date}: {text}\n\n" [docs]class ChatGPTLoader(BaseLoader): """Loader that loads conversations from exported ChatGPT data.""" def __init__(self, log_file: str, num_logs: int = -1): self.log_file = log_file self.num_logs = num_logs [docs] def load(self) -> List[Document]: with open(self.log_file, encoding="utf8") as f: data = json.load(f)[: self.num_logs] if self.num_logs else json.load(f) documents = [] for d in data: title = d["title"] messages = d["mapping"] text = "".join( [ concatenate_rows(messages[key]["message"], title) for idx, key in enumerate(messages) if not ( idx == 0
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/chatgpt.html
225539bd6353-1
if not ( idx == 0 and messages[key]["message"]["author"]["role"] == "system" ) ] ) metadata = {"source": str(self.log_file)} documents.append(Document(page_content=text, metadata=metadata)) return documents
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/chatgpt.html
6dcb746633a4-0
Source code for langchain.document_loaders.xml """Loader that loads Microsoft Excel files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) [docs]class UnstructuredXMLLoader(UnstructuredFileLoader): """Loader that uses unstructured to load XML files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): validate_unstructured_version(min_unstructured_version="0.6.7") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.xml import partition_xml return partition_xml(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/xml.html
32e831e9ac79-0
Source code for langchain.document_loaders.dataframe """Load from Dataframe object""" from typing import Any, Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class DataFrameLoader(BaseLoader): """Load Pandas DataFrames.""" def __init__(self, data_frame: Any, page_content_column: str = "text"): """Initialize with dataframe object.""" import pandas as pd if not isinstance(data_frame, pd.DataFrame): raise ValueError( f"Expected data_frame to be a pd.DataFrame, got {type(data_frame)}" ) self.data_frame = data_frame self.page_content_column = page_content_column [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load records from dataframe.""" for _, row in self.data_frame.iterrows(): text = row[self.page_content_column] metadata = row.to_dict() metadata.pop(self.page_content_column) yield Document(page_content=text, metadata=metadata) [docs] def load(self) -> List[Document]: """Load full dataframe.""" return list(self.lazy_load())
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/dataframe.html
9e73addae985-0
Source code for langchain.document_loaders.confluence """Load Data from a Confluence Space""" import logging from enum import Enum from io import BytesIO from typing import Any, Callable, Dict, List, Optional, Union from tenacity import ( before_sleep_log, retry, stop_after_attempt, wait_exponential, ) from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) class ContentFormat(str, Enum): """Enumerator of the content formats of Confluence page.""" STORAGE = "body.storage" VIEW = "body.view" def get_content(self, page: dict) -> str: if self == ContentFormat.STORAGE: return page["body"]["storage"]["value"] elif self == ContentFormat.VIEW: return page["body"]["view"]["value"] raise ValueError("unknown content format") [docs]class ConfluenceLoader(BaseLoader): """ Load Confluence pages. Port of https://llamahub.ai/l/confluence This currently supports username/api_key, Oauth2 login or personal access token authentication. Specify a list page_ids and/or space_key to load in the corresponding pages into Document objects, if both are specified the union of both sets will be returned. You can also specify a boolean `include_attachments` to include attachments, this is set to False by default, if set to True all attachments will be downloaded and ConfluenceReader will extract the text from the attachments and add it to the Document object. Currently supported attachment types are: PDF, PNG, JPEG/JPG, SVG, Word and Excel. Confluence API supports difference format of page content. The storage format is the
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
9e73addae985-1
Confluence API supports difference format of page content. The storage format is the raw XML representation for storage. The view format is the HTML representation for viewing with macros are rendered as though it is viewed by users. You can pass a enum `content_format` argument to `load()` to specify the content format, this is set to `ContentFormat.STORAGE` by default. Hint: space_key and page_id can both be found in the URL of a page in Confluence - https://yoursite.atlassian.com/wiki/spaces/<space_key>/pages/<page_id> Example: .. code-block:: python from langchain.document_loaders import ConfluenceLoader loader = ConfluenceLoader( url="https://yoursite.atlassian.com/wiki", username="me", api_key="12345" ) documents = loader.load(space_key="SPACE",limit=50) :param url: _description_ :type url: str :param api_key: _description_, defaults to None :type api_key: str, optional :param username: _description_, defaults to None :type username: str, optional :param oauth2: _description_, defaults to {} :type oauth2: dict, optional :param token: _description_, defaults to None :type token: str, optional :param cloud: _description_, defaults to True :type cloud: bool, optional :param number_of_retries: How many times to retry, defaults to 3 :type number_of_retries: Optional[int], optional :param min_retry_seconds: defaults to 2 :type min_retry_seconds: Optional[int], optional
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
9e73addae985-2
:type min_retry_seconds: Optional[int], optional :param max_retry_seconds: defaults to 10 :type max_retry_seconds: Optional[int], optional :param confluence_kwargs: additional kwargs to initialize confluence with :type confluence_kwargs: dict, optional :raises ValueError: Errors while validating input :raises ImportError: Required dependencies not installed. """ def __init__( self, url: str, api_key: Optional[str] = None, username: Optional[str] = None, oauth2: Optional[dict] = None, token: Optional[str] = None, cloud: Optional[bool] = True, number_of_retries: Optional[int] = 3, min_retry_seconds: Optional[int] = 2, max_retry_seconds: Optional[int] = 10, confluence_kwargs: Optional[dict] = None, ): confluence_kwargs = confluence_kwargs or {} errors = ConfluenceLoader.validate_init_args( url, api_key, username, oauth2, token ) if errors: raise ValueError(f"Error(s) while validating input: {errors}") self.base_url = url self.number_of_retries = number_of_retries self.min_retry_seconds = min_retry_seconds self.max_retry_seconds = max_retry_seconds try: from atlassian import Confluence # noqa: F401 except ImportError: raise ImportError( "`atlassian` package not found, please run " "`pip install atlassian-python-api`" ) if oauth2: self.confluence = Confluence( url=url, oauth2=oauth2, cloud=cloud, **confluence_kwargs
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
9e73addae985-3
url=url, oauth2=oauth2, cloud=cloud, **confluence_kwargs ) elif token: self.confluence = Confluence( url=url, token=token, cloud=cloud, **confluence_kwargs ) else: self.confluence = Confluence( url=url, username=username, password=api_key, cloud=cloud, **confluence_kwargs, ) [docs] @staticmethod def validate_init_args( url: Optional[str] = None, api_key: Optional[str] = None, username: Optional[str] = None, oauth2: Optional[dict] = None, token: Optional[str] = None, ) -> Union[List, None]: """Validates proper combinations of init arguments""" errors = [] if url is None: errors.append("Must provide `base_url`") if (api_key and not username) or (username and not api_key): errors.append( "If one of `api_key` or `username` is provided, " "the other must be as well." ) if (api_key or username) and oauth2: errors.append( "Cannot provide a value for `api_key` and/or " "`username` and provide a value for `oauth2`" ) if oauth2 and oauth2.keys() != [ "access_token", "access_token_secret", "consumer_key", "key_cert", ]: errors.append( "You have either ommited require keys or added extra " "keys to the oauth2 dictionary. key values should be "
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
9e73addae985-4
"keys to the oauth2 dictionary. key values should be " "`['access_token', 'access_token_secret', 'consumer_key', 'key_cert']`" ) if token and (api_key or username or oauth2): errors.append( "Cannot provide a value for `token` and a value for `api_key`, " "`username` or `oauth2`" ) if errors: return errors return None [docs] def load( self, space_key: Optional[str] = None, page_ids: Optional[List[str]] = None, label: Optional[str] = None, cql: Optional[str] = None, include_restricted_content: bool = False, include_archived_content: bool = False, include_attachments: bool = False, include_comments: bool = False, content_format: ContentFormat = ContentFormat.STORAGE, limit: Optional[int] = 50, max_pages: Optional[int] = 1000, ocr_languages: Optional[str] = None, ) -> List[Document]: """ :param space_key: Space key retrieved from a confluence URL, defaults to None :type space_key: Optional[str], optional :param page_ids: List of specific page IDs to load, defaults to None :type page_ids: Optional[List[str]], optional :param label: Get all pages with this label, defaults to None :type label: Optional[str], optional :param cql: CQL Expression, defaults to None :type cql: Optional[str], optional :param include_restricted_content: defaults to False :type include_restricted_content: bool, optional
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
9e73addae985-5
:type include_restricted_content: bool, optional :param include_archived_content: Whether to include archived content, defaults to False :type include_archived_content: bool, optional :param include_attachments: defaults to False :type include_attachments: bool, optional :param include_comments: defaults to False :type include_comments: bool, optional :param content_format: Specify content format, defaults to ContentFormat.STORAGE :type content_format: ContentFormat :param limit: Maximum number of pages to retrieve per request, defaults to 50 :type limit: int, optional :param max_pages: Maximum number of pages to retrieve in total, defaults 1000 :type max_pages: int, optional :param ocr_languages: The languages to use for the Tesseract agent. To use a language, you'll first need to install the appropriate Tesseract language pack. :type ocr_languages: str, optional :raises ValueError: _description_ :raises ImportError: _description_ :return: _description_ :rtype: List[Document] """ if not space_key and not page_ids and not label and not cql: raise ValueError( "Must specify at least one among `space_key`, `page_ids`, " "`label`, `cql` parameters." ) docs = [] if space_key: pages = self.paginate_request( self.confluence.get_all_pages_from_space, space=space_key, limit=limit, max_pages=max_pages, status="any" if include_archived_content else "current", expand=content_format.value, ) docs += self.process_pages(
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
9e73addae985-6
expand=content_format.value, ) docs += self.process_pages( pages, include_restricted_content, include_attachments, include_comments, content_format, ocr_languages, ) if label: pages = self.paginate_request( self.confluence.get_all_pages_by_label, label=label, limit=limit, max_pages=max_pages, ) ids_by_label = [page["id"] for page in pages] if page_ids: page_ids = list(set(page_ids + ids_by_label)) else: page_ids = list(set(ids_by_label)) if cql: pages = self.paginate_request( self._search_content_by_cql, cql=cql, limit=limit, max_pages=max_pages, include_archived_spaces=include_archived_content, expand=content_format.value, ) docs += self.process_pages( pages, include_restricted_content, include_attachments, include_comments, content_format, ocr_languages, ) if page_ids: for page_id in page_ids: get_page = retry( reraise=True, stop=stop_after_attempt( self.number_of_retries # type: ignore[arg-type] ), wait=wait_exponential( multiplier=1, # type: ignore[arg-type] min=self.min_retry_seconds, # type: ignore[arg-type] max=self.max_retry_seconds, # type: ignore[arg-type] ), before_sleep=before_sleep_log(logger, logging.WARNING), )(self.confluence.get_page_by_id)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
9e73addae985-7
)(self.confluence.get_page_by_id) page = get_page(page_id=page_id, expand=content_format.value) if not include_restricted_content and not self.is_public_page(page): continue doc = self.process_page( page, include_attachments, include_comments, content_format, ocr_languages, ) docs.append(doc) return docs def _search_content_by_cql( self, cql: str, include_archived_spaces: Optional[bool] = None, **kwargs: Any ) -> List[dict]: url = "rest/api/content/search" params: Dict[str, Any] = {"cql": cql} params.update(kwargs) if include_archived_spaces is not None: params["includeArchivedSpaces"] = include_archived_spaces response = self.confluence.get(url, params=params) return response.get("results", []) [docs] def paginate_request(self, retrieval_method: Callable, **kwargs: Any) -> List: """Paginate the various methods to retrieve groups of pages. Unfortunately, due to page size, sometimes the Confluence API doesn't match the limit value. If `limit` is >100 confluence seems to cap the response to 100. Also, due to the Atlassian Python package, we don't get the "next" values from the "_links" key because they only return the value from the results key. So here, the pagination starts from 0 and goes until the max_pages, getting the `limit` number of pages with each request. We have to manually check if there are more docs based on the length of the returned list of pages, rather than
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
9e73addae985-8
are more docs based on the length of the returned list of pages, rather than just checking for the presence of a `next` key in the response like this page would have you do: https://developer.atlassian.com/server/confluence/pagination-in-the-rest-api/ :param retrieval_method: Function used to retrieve docs :type retrieval_method: callable :return: List of documents :rtype: List """ max_pages = kwargs.pop("max_pages") docs: List[dict] = [] while len(docs) < max_pages: get_pages = retry( reraise=True, stop=stop_after_attempt( self.number_of_retries # type: ignore[arg-type] ), wait=wait_exponential( multiplier=1, min=self.min_retry_seconds, # type: ignore[arg-type] max=self.max_retry_seconds, # type: ignore[arg-type] ), before_sleep=before_sleep_log(logger, logging.WARNING), )(retrieval_method) batch = get_pages(**kwargs, start=len(docs)) if not batch: break docs.extend(batch) return docs[:max_pages] [docs] def is_public_page(self, page: dict) -> bool: """Check if a page is publicly accessible.""" restrictions = self.confluence.get_all_restrictions_for_content(page["id"]) return ( page["status"] == "current" and not restrictions["read"]["restrictions"]["user"]["results"] and not restrictions["read"]["restrictions"]["group"]["results"] ) [docs] def process_pages( self, pages: List[dict], include_restricted_content: bool,
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
9e73addae985-9
pages: List[dict], include_restricted_content: bool, include_attachments: bool, include_comments: bool, content_format: ContentFormat, ocr_languages: Optional[str] = None, ) -> List[Document]: """Process a list of pages into a list of documents.""" docs = [] for page in pages: if not include_restricted_content and not self.is_public_page(page): continue doc = self.process_page( page, include_attachments, include_comments, content_format, ocr_languages, ) docs.append(doc) return docs [docs] def process_page( self, page: dict, include_attachments: bool, include_comments: bool, content_format: ContentFormat, ocr_languages: Optional[str] = None, ) -> Document: try: from bs4 import BeautifulSoup # type: ignore except ImportError: raise ImportError( "`beautifulsoup4` package not found, please run " "`pip install beautifulsoup4`" ) if include_attachments: attachment_texts = self.process_attachment(page["id"], ocr_languages) else: attachment_texts = [] content = content_format.get_content(page) text = BeautifulSoup(content, "lxml").get_text(" ", strip=True) + "".join( attachment_texts ) if include_comments: comments = self.confluence.get_page_comments( page["id"], expand="body.view.value", depth="all" )["results"] comment_texts = [ BeautifulSoup(comment["body"]["view"]["value"], "lxml").get_text(
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
9e73addae985-10
BeautifulSoup(comment["body"]["view"]["value"], "lxml").get_text( " ", strip=True ) for comment in comments ] text = text + "".join(comment_texts) return Document( page_content=text, metadata={ "title": page["title"], "id": page["id"], "source": self.base_url.strip("/") + page["_links"]["webui"], }, ) [docs] def process_attachment( self, page_id: str, ocr_languages: Optional[str] = None, ) -> List[str]: try: from PIL import Image # noqa: F401 except ImportError: raise ImportError( "`Pillow` package not found, " "please run `pip install Pillow`" ) # depending on setup you may also need to set the correct path for # poppler and tesseract attachments = self.confluence.get_attachments_from_content(page_id)["results"] texts = [] for attachment in attachments: media_type = attachment["metadata"]["mediaType"] absolute_url = self.base_url + attachment["_links"]["download"] title = attachment["title"] if media_type == "application/pdf": text = title + self.process_pdf(absolute_url, ocr_languages) elif ( media_type == "image/png" or media_type == "image/jpg" or media_type == "image/jpeg" ): text = title + self.process_image(absolute_url, ocr_languages) elif ( media_type == "application/vnd.openxmlformats-officedocument" ".wordprocessingml.document" ):
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
9e73addae985-11
".wordprocessingml.document" ): text = title + self.process_doc(absolute_url) elif media_type == "application/vnd.ms-excel": text = title + self.process_xls(absolute_url) elif media_type == "image/svg+xml": text = title + self.process_svg(absolute_url, ocr_languages) else: continue texts.append(text) return texts [docs] def process_pdf( self, link: str, ocr_languages: Optional[str] = None, ) -> str: try: import pytesseract # noqa: F401 from pdf2image import convert_from_bytes # noqa: F401 except ImportError: raise ImportError( "`pytesseract` or `pdf2image` package not found, " "please run `pip install pytesseract pdf2image`" ) response = self.confluence.request(path=link, absolute=True) text = "" if ( response.status_code != 200 or response.content == b"" or response.content is None ): return text try: images = convert_from_bytes(response.content) except ValueError: return text for i, image in enumerate(images): image_text = pytesseract.image_to_string(image, lang=ocr_languages) text += f"Page {i + 1}:\n{image_text}\n\n" return text [docs] def process_image( self, link: str, ocr_languages: Optional[str] = None, ) -> str: try: import pytesseract # noqa: F401
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
9e73addae985-12
try: import pytesseract # noqa: F401 from PIL import Image # noqa: F401 except ImportError: raise ImportError( "`pytesseract` or `Pillow` package not found, " "please run `pip install pytesseract Pillow`" ) response = self.confluence.request(path=link, absolute=True) text = "" if ( response.status_code != 200 or response.content == b"" or response.content is None ): return text try: image = Image.open(BytesIO(response.content)) except OSError: return text return pytesseract.image_to_string(image, lang=ocr_languages) [docs] def process_doc(self, link: str) -> str: try: import docx2txt # noqa: F401 except ImportError: raise ImportError( "`docx2txt` package not found, please run `pip install docx2txt`" ) response = self.confluence.request(path=link, absolute=True) text = "" if ( response.status_code != 200 or response.content == b"" or response.content is None ): return text file_data = BytesIO(response.content) return docx2txt.process(file_data) [docs] def process_xls(self, link: str) -> str: try: import xlrd # noqa: F401 except ImportError: raise ImportError("`xlrd` package not found, please run `pip install xlrd`") response = self.confluence.request(path=link, absolute=True) text = "" if ( response.status_code != 200
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
9e73addae985-13
text = "" if ( response.status_code != 200 or response.content == b"" or response.content is None ): return text workbook = xlrd.open_workbook(file_contents=response.content) for sheet in workbook.sheets(): text += f"{sheet.name}:\n" for row in range(sheet.nrows): for col in range(sheet.ncols): text += f"{sheet.cell_value(row, col)}\t" text += "\n" text += "\n" return text [docs] def process_svg( self, link: str, ocr_languages: Optional[str] = None, ) -> str: try: import pytesseract # noqa: F401 from PIL import Image # noqa: F401 from reportlab.graphics import renderPM # noqa: F401 from svglib.svglib import svg2rlg # noqa: F401 except ImportError: raise ImportError( "`pytesseract`, `Pillow`, `reportlab` or `svglib` package not found, " "please run `pip install pytesseract Pillow reportlab svglib`" ) response = self.confluence.request(path=link, absolute=True) text = "" if ( response.status_code != 200 or response.content == b"" or response.content is None ): return text drawing = svg2rlg(BytesIO(response.content)) img_data = BytesIO() renderPM.drawToFile(drawing, img_data, fmt="PNG") img_data.seek(0) image = Image.open(img_data)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
9e73addae985-14
img_data.seek(0) image = Image.open(img_data) return pytesseract.image_to_string(image, lang=ocr_languages)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/confluence.html
ff8f7ef71aea-0
Source code for langchain.document_loaders.markdown """Loader that loads Markdown files.""" from typing import List from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class UnstructuredMarkdownLoader(UnstructuredFileLoader): """Loader that uses unstructured to load markdown files.""" def _get_elements(self) -> List: from unstructured.__version__ import __version__ as __unstructured_version__ from unstructured.partition.md import partition_md # NOTE(MthwRobinson) - enables the loader to work when you're using pre-release # versions of unstructured like 0.4.17-dev1 _unstructured_version = __unstructured_version__.split("-")[0] unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")]) if unstructured_version < (0, 4, 16): raise ValueError( f"You are on unstructured version {__unstructured_version__}. " "Partitioning markdown files is only supported in unstructured>=0.4.16." ) return partition_md(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/markdown.html
ec9535e74a8a-0
Source code for langchain.document_loaders.url """Loader that uses unstructured to load HTML files.""" import logging from typing import Any, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class UnstructuredURLLoader(BaseLoader): """Loader that uses unstructured to load HTML files.""" def __init__( self, urls: List[str], continue_on_failure: bool = True, mode: str = "single", show_progress_bar: bool = False, **unstructured_kwargs: Any, ): """Initialize with file path.""" try: import unstructured # noqa:F401 from unstructured.__version__ import __version__ as __unstructured_version__ self.__version = __unstructured_version__ except ImportError: raise ValueError( "unstructured package not found, please install it with " "`pip install unstructured`" ) self._validate_mode(mode) self.mode = mode headers = unstructured_kwargs.pop("headers", {}) if len(headers.keys()) != 0: warn_about_headers = False if self.__is_non_html_available(): warn_about_headers = not self.__is_headers_available_for_non_html() else: warn_about_headers = not self.__is_headers_available_for_html() if warn_about_headers: logger.warning( "You are using an old version of unstructured. " "The headers parameter is ignored" ) self.urls = urls self.continue_on_failure = continue_on_failure self.headers = headers self.unstructured_kwargs = unstructured_kwargs self.show_progress_bar = show_progress_bar
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/url.html
ec9535e74a8a-1
self.unstructured_kwargs = unstructured_kwargs self.show_progress_bar = show_progress_bar def _validate_mode(self, mode: str) -> None: _valid_modes = {"single", "elements"} if mode not in _valid_modes: raise ValueError( f"Got {mode} for `mode`, but should be one of `{_valid_modes}`" ) def __is_headers_available_for_html(self) -> bool: _unstructured_version = self.__version.split("-")[0] unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")]) return unstructured_version >= (0, 5, 7) def __is_headers_available_for_non_html(self) -> bool: _unstructured_version = self.__version.split("-")[0] unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")]) return unstructured_version >= (0, 5, 13) def __is_non_html_available(self) -> bool: _unstructured_version = self.__version.split("-")[0] unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")]) return unstructured_version >= (0, 5, 12) [docs] def load(self) -> List[Document]: """Load file.""" from unstructured.partition.auto import partition from unstructured.partition.html import partition_html docs: List[Document] = list() if self.show_progress_bar: try: from tqdm import tqdm except ImportError as e: raise ImportError( "Package tqdm must be installed if show_progress_bar=True. " "Please install with 'pip install tqdm' or set "
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/url.html
ec9535e74a8a-2
"Please install with 'pip install tqdm' or set " "show_progress_bar=False." ) from e urls = tqdm(self.urls) else: urls = self.urls for url in urls: try: if self.__is_non_html_available(): if self.__is_headers_available_for_non_html(): elements = partition( url=url, headers=self.headers, **self.unstructured_kwargs ) else: elements = partition(url=url, **self.unstructured_kwargs) else: if self.__is_headers_available_for_html(): elements = partition_html( url=url, headers=self.headers, **self.unstructured_kwargs ) else: elements = partition_html(url=url, **self.unstructured_kwargs) except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching or processing {url}, exeption: {e}") continue else: raise e if self.mode == "single": text = "\n\n".join([str(el) for el in elements]) metadata = {"source": url} docs.append(Document(page_content=text, metadata=metadata)) elif self.mode == "elements": for element in elements: metadata = element.metadata.to_dict() metadata["category"] = element.category docs.append(Document(page_content=str(element), metadata=metadata)) return docs
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/url.html
8269b7e43f80-0
Source code for langchain.document_loaders.bilibili import json import re import warnings from typing import List, Tuple import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class BiliBiliLoader(BaseLoader): """Loader that loads bilibili transcripts.""" def __init__(self, video_urls: List[str]): """Initialize with bilibili url.""" self.video_urls = video_urls [docs] def load(self) -> List[Document]: """Load from bilibili url.""" results = [] for url in self.video_urls: transcript, video_info = self._get_bilibili_subs_and_info(url) doc = Document(page_content=transcript, metadata=video_info) results.append(doc) return results def _get_bilibili_subs_and_info(self, url: str) -> Tuple[str, dict]: try: from bilibili_api import sync, video except ImportError: raise ValueError( "requests package not found, please install it with " "`pip install bilibili-api-python`" ) bvid = re.search(r"BV\w+", url) if bvid is not None: v = video.Video(bvid=bvid.group()) else: aid = re.search(r"av[0-9]+", url) if aid is not None: try: v = video.Video(aid=int(aid.group()[2:])) except AttributeError: raise ValueError(f"{url} is not bilibili url.") else: raise ValueError(f"{url} is not bilibili url.") video_info = sync(v.get_info())
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/bilibili.html
8269b7e43f80-1
video_info = sync(v.get_info()) video_info.update({"url": url}) # Get subtitle url subtitle = video_info.pop("subtitle") sub_list = subtitle["list"] if sub_list: sub_url = sub_list[0]["subtitle_url"] result = requests.get(sub_url) raw_sub_titles = json.loads(result.content)["body"] raw_transcript = " ".join([c["content"] for c in raw_sub_titles]) raw_transcript_with_meta_info = ( f"Video Title: {video_info['title']}," f"description: {video_info['desc']}\n\n" f"Transcript: {raw_transcript}" ) return raw_transcript_with_meta_info, video_info else: raw_transcript = "" warnings.warn( f""" No subtitles found for video: {url}. Return Empty transcript. """ ) return raw_transcript, video_info
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/bilibili.html
cc2c32a21932-0
Source code for langchain.document_loaders.hugging_face_dataset """Loader that loads HuggingFace datasets.""" from typing import Iterator, List, Mapping, Optional, Sequence, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class HuggingFaceDatasetLoader(BaseLoader): """Loading logic for loading documents from the Hugging Face Hub.""" def __init__( self, path: str, page_content_column: str = "text", name: Optional[str] = None, data_dir: Optional[str] = None, data_files: Optional[ Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]] ] = None, cache_dir: Optional[str] = None, keep_in_memory: Optional[bool] = None, save_infos: bool = False, use_auth_token: Optional[Union[bool, str]] = None, num_proc: Optional[int] = None, ): """Initialize the HuggingFaceDatasetLoader. Args: path: Path or name of the dataset. page_content_column: Page content column name. name: Name of the dataset configuration. data_dir: Data directory of the dataset configuration. data_files: Path(s) to source data file(s). cache_dir: Directory to read/write data. keep_in_memory: Whether to copy the dataset in-memory. save_infos: Save the dataset information (checksums/size/splits/...). use_auth_token: Bearer token for remote files on the Datasets Hub. num_proc: Number of processes. """ self.path = path self.page_content_column = page_content_column self.name = name
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/hugging_face_dataset.html
cc2c32a21932-1
self.page_content_column = page_content_column self.name = name self.data_dir = data_dir self.data_files = data_files self.cache_dir = cache_dir self.keep_in_memory = keep_in_memory self.save_infos = save_infos self.use_auth_token = use_auth_token self.num_proc = num_proc [docs] def lazy_load( self, ) -> Iterator[Document]: """Load documents lazily.""" try: from datasets import load_dataset except ImportError: raise ImportError( "Could not import datasets python package. " "Please install it with `pip install datasets`." ) dataset = load_dataset( path=self.path, name=self.name, data_dir=self.data_dir, data_files=self.data_files, cache_dir=self.cache_dir, keep_in_memory=self.keep_in_memory, save_infos=self.save_infos, use_auth_token=self.use_auth_token, num_proc=self.num_proc, ) yield from ( Document( page_content=row.pop(self.page_content_column), metadata=row, ) for key in dataset.keys() for row in dataset[key] ) [docs] def load(self) -> List[Document]: """Load documents.""" return list(self.lazy_load())
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/hugging_face_dataset.html
540b9497356d-0
Source code for langchain.document_loaders.roam """Loader that loads Roam directory dump.""" from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class RoamLoader(BaseLoader): """Loader that loads Roam files from disk.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" ps = list(Path(self.file_path).glob("**/*.md")) docs = [] for p in ps: with open(p) as f: text = f.read() metadata = {"source": str(p)} docs.append(Document(page_content=text, metadata=metadata)) return docs
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/roam.html
d86681e165ba-0
Source code for langchain.document_loaders.spreedly """Loader that fetches data from Spreedly API.""" import json import urllib.request from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import stringify_dict SPREEDLY_ENDPOINTS = { "gateways_options": "https://core.spreedly.com/v1/gateways_options.json", "gateways": "https://core.spreedly.com/v1/gateways.json", "receivers_options": "https://core.spreedly.com/v1/receivers_options.json", "receivers": "https://core.spreedly.com/v1/receivers.json", "payment_methods": "https://core.spreedly.com/v1/payment_methods.json", "certificates": "https://core.spreedly.com/v1/certificates.json", "transactions": "https://core.spreedly.com/v1/transactions.json", "environments": "https://core.spreedly.com/v1/environments.json", } [docs]class SpreedlyLoader(BaseLoader): """Loader that fetches data from Spreedly API.""" def __init__(self, access_token: str, resource: str) -> None: self.access_token = access_token self.resource = resource self.headers = { "Authorization": f"Bearer {self.access_token}", "Accept": "application/json", } def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_dict(json_data)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/spreedly.html
d86681e165ba-1
text = stringify_dict(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = SPREEDLY_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint) [docs] def load(self) -> List[Document]: return self._get_resource()
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/spreedly.html
ea67133ad100-0
Source code for langchain.document_loaders.web_base """Web base loader class.""" import asyncio import logging import warnings from typing import Any, Dict, Iterator, List, Optional, Union import aiohttp import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) default_header_template = { "User-Agent": "", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*" ";q=0.8", "Accept-Language": "en-US,en;q=0.5", "Referer": "https://www.google.com/", "DNT": "1", "Connection": "keep-alive", "Upgrade-Insecure-Requests": "1", } def _build_metadata(soup: Any, url: str) -> dict: """Build metadata from BeautifulSoup output.""" metadata = {"source": url} if title := soup.find("title"): metadata["title"] = title.get_text() if description := soup.find("meta", attrs={"name": "description"}): metadata["description"] = description.get("content", None) if html := soup.find("html"): metadata["language"] = html.get("lang", None) return metadata [docs]class WebBaseLoader(BaseLoader): """Loader that uses urllib and beautiful soup to load webpages.""" web_paths: List[str] requests_per_second: int = 2 """Max number of concurrent requests to make.""" default_parser: str = "html.parser" """Default parser to use for BeautifulSoup.""" requests_kwargs: Dict[str, Any] = {} """kwargs for requests"""
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/web_base.html
ea67133ad100-1
requests_kwargs: Dict[str, Any] = {} """kwargs for requests""" raise_for_status: bool = False """Raise an exception if http status code denotes an error.""" bs_get_text_kwargs: Dict[str, Any] = {} """kwargs for beatifulsoup4 get_text""" def __init__( self, web_path: Union[str, List[str]], header_template: Optional[dict] = None, verify: Optional[bool] = True, proxies: Optional[dict] = None, ): """Initialize with webpage path.""" # TODO: Deprecate web_path in favor of web_paths, and remove this # left like this because there are a number of loaders that expect single # urls if isinstance(web_path, str): self.web_paths = [web_path] elif isinstance(web_path, List): self.web_paths = web_path self.session = requests.Session() try: import bs4 # noqa:F401 except ImportError: raise ValueError( "bs4 package not found, please install it with " "`pip install bs4`" ) # Choose to verify self.verify = verify headers = header_template or default_header_template if not headers.get("User-Agent"): try: from fake_useragent import UserAgent headers["User-Agent"] = UserAgent().random except ImportError: logger.info( "fake_useragent not found, using default user agent." "To get a realistic header for requests, " "`pip install fake_useragent`." ) self.session.headers = dict(headers) if proxies: self.session.proxies.update(proxies) @property
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/web_base.html
ea67133ad100-2
if proxies: self.session.proxies.update(proxies) @property def web_path(self) -> str: if len(self.web_paths) > 1: raise ValueError("Multiple webpaths found.") return self.web_paths[0] async def _fetch( self, url: str, retries: int = 3, cooldown: int = 2, backoff: float = 1.5 ) -> str: # For SiteMap SSL verification if not self.requests_kwargs.get("verify", True): connector = aiohttp.TCPConnector(ssl=False) else: connector = None async with aiohttp.ClientSession(connector=connector) as session: for i in range(retries): try: async with session.get( url, headers=self.session.headers, verify=self.verify ) as response: return await response.text() except aiohttp.ClientConnectionError as e: if i == retries - 1: raise else: logger.warning( f"Error fetching {url} with attempt " f"{i + 1}/{retries}: {e}. Retrying..." ) await asyncio.sleep(cooldown * backoff**i) raise ValueError("retry count exceeded") async def _fetch_with_rate_limit( self, url: str, semaphore: asyncio.Semaphore ) -> str: async with semaphore: return await self._fetch(url) [docs] async def fetch_all(self, urls: List[str]) -> Any: """Fetch all urls concurrently with rate limiting.""" semaphore = asyncio.Semaphore(self.requests_per_second) tasks = [] for url in urls:
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/web_base.html
ea67133ad100-3
tasks = [] for url in urls: task = asyncio.ensure_future(self._fetch_with_rate_limit(url, semaphore)) tasks.append(task) try: from tqdm.asyncio import tqdm_asyncio return await tqdm_asyncio.gather( *tasks, desc="Fetching pages", ascii=True, mininterval=1 ) except ImportError: warnings.warn("For better logging of progress, `pip install tqdm`") return await asyncio.gather(*tasks) @staticmethod def _check_parser(parser: str) -> None: """Check that parser is valid for bs4.""" valid_parsers = ["html.parser", "lxml", "xml", "lxml-xml", "html5lib"] if parser not in valid_parsers: raise ValueError( "`parser` must be one of " + ", ".join(valid_parsers) + "." ) [docs] def scrape_all(self, urls: List[str], parser: Union[str, None] = None) -> List[Any]: """Fetch all urls, then return soups for all results.""" from bs4 import BeautifulSoup results = asyncio.run(self.fetch_all(urls)) final_results = [] for i, result in enumerate(results): url = urls[i] if parser is None: if url.endswith(".xml"): parser = "xml" else: parser = self.default_parser self._check_parser(parser) final_results.append(BeautifulSoup(result, parser)) return final_results def _scrape(self, url: str, parser: Union[str, None] = None) -> Any: from bs4 import BeautifulSoup if parser is None: if url.endswith(".xml"):
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/web_base.html
ea67133ad100-4
if parser is None: if url.endswith(".xml"): parser = "xml" else: parser = self.default_parser self._check_parser(parser) html_doc = self.session.get(url, verify=self.verify, **self.requests_kwargs) if self.raise_for_status: html_doc.raise_for_status() html_doc.encoding = html_doc.apparent_encoding return BeautifulSoup(html_doc.text, parser) [docs] def scrape(self, parser: Union[str, None] = None) -> Any: """Scrape data from webpage and return it in BeautifulSoup format.""" if parser is None: parser = self.default_parser return self._scrape(self.web_path, parser) [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load text from the url(s) in web_path.""" for path in self.web_paths: soup = self._scrape(path) text = soup.get_text(**self.bs_get_text_kwargs) metadata = _build_metadata(soup, path) yield Document(page_content=text, metadata=metadata) [docs] def load(self) -> List[Document]: """Load text from the url(s) in web_path.""" return list(self.lazy_load()) [docs] def aload(self) -> List[Document]: """Load text from the urls in web_path async into Documents.""" results = self.scrape_all(self.web_paths) docs = [] for i in range(len(results)): soup = results[i] text = soup.get_text(**self.bs_get_text_kwargs) metadata = _build_metadata(soup, self.web_paths[i]) docs.append(Document(page_content=text, metadata=metadata)) return docs
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/web_base.html
d58f3de37bd7-0
Source code for langchain.document_loaders.conllu """Load CoNLL-U files.""" import csv from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class CoNLLULoader(BaseLoader): """Load CoNLL-U files.""" def __init__(self, file_path: str): """Initialize with file path.""" self.file_path = file_path [docs] def load(self) -> List[Document]: """Load from file path.""" with open(self.file_path, encoding="utf8") as f: tsv = list(csv.reader(f, delimiter="\t")) # If len(line) > 1, the line is not a comment lines = [line for line in tsv if len(line) > 1] text = "" for i, line in enumerate(lines): # Do not add a space after a punctuation mark or at the end of the sentence if line[9] == "SpaceAfter=No" or i == len(lines) - 1: text += line[1] else: text += line[1] + " " metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/conllu.html
f32028cd0d58-0
Source code for langchain.document_loaders.acreom """Loader that loads acreom vault from a directory.""" import re from pathlib import Path from typing import Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class AcreomLoader(BaseLoader): FRONT_MATTER_REGEX = re.compile(r"^---\n(.*?)\n---\n", re.MULTILINE | re.DOTALL) def __init__( self, path: str, encoding: str = "UTF-8", collect_metadata: bool = True ): """Initialize with path.""" self.file_path = path self.encoding = encoding self.collect_metadata = collect_metadata def _parse_front_matter(self, content: str) -> dict: """Parse front matter metadata from the content and return it as a dict.""" if not self.collect_metadata: return {} match = self.FRONT_MATTER_REGEX.search(content) front_matter = {} if match: lines = match.group(1).split("\n") for line in lines: if ":" in line: key, value = line.split(":", 1) front_matter[key.strip()] = value.strip() else: # Skip lines without a colon continue return front_matter def _remove_front_matter(self, content: str) -> str: """Remove front matter metadata from the given content.""" if not self.collect_metadata: return content return self.FRONT_MATTER_REGEX.sub("", content) def _process_acreom_content(self, content: str) -> str: # remove acreom specific elements from content that # do not contribute to the context of current document
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/acreom.html
f32028cd0d58-1
# do not contribute to the context of current document content = re.sub("\s*-\s\[\s\]\s.*|\s*\[\s\]\s.*", "", content) # rm tasks content = re.sub("#", "", content) # rm hashtags content = re.sub("\[\[.*?\]\]", "", content) # rm doclinks return content [docs] def lazy_load(self) -> Iterator[Document]: ps = list(Path(self.file_path).glob("**/*.md")) for p in ps: with open(p, encoding=self.encoding) as f: text = f.read() front_matter = self._parse_front_matter(text) text = self._remove_front_matter(text) text = self._process_acreom_content(text) metadata = { "source": str(p.name), "path": str(p), **front_matter, } yield Document(page_content=text, metadata=metadata) [docs] def load(self) -> List[Document]: return list(self.lazy_load())
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/acreom.html
8f604b035db2-0
Source code for langchain.document_loaders.azlyrics """Loader that loads AZLyrics.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.web_base import WebBaseLoader [docs]class AZLyricsLoader(WebBaseLoader): """Loader that loads AZLyrics webpages.""" [docs] def load(self) -> List[Document]: """Load webpage.""" soup = self.scrape() title = soup.title.text lyrics = soup.find_all("div", {"class": ""})[2].text text = title + lyrics metadata = {"source": self.web_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/azlyrics.html
273bef8e013b-0
Source code for langchain.document_loaders.airbyte_json """Loader that loads local airbyte json files.""" import json from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import stringify_dict [docs]class AirbyteJSONLoader(BaseLoader): """Loader that loads local airbyte json files.""" def __init__(self, file_path: str): """Initialize with file path. This should start with '/tmp/airbyte_local/'.""" self.file_path = file_path [docs] def load(self) -> List[Document]: """Load file.""" text = "" for line in open(self.file_path, "r"): data = json.loads(line)["_airbyte_data"] text += stringify_dict(data) metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/airbyte_json.html
4864eb42e99f-0
Source code for langchain.document_loaders.image """Loader that loads image files.""" from typing import List from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class UnstructuredImageLoader(UnstructuredFileLoader): """Loader that uses unstructured to load image files, such as PNGs and JPGs.""" def _get_elements(self) -> List: from unstructured.partition.image import partition_image return partition_image(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/image.html
ebe87a6aa180-0
Source code for langchain.document_loaders.readthedocs """Loader that loads ReadTheDocs documentation directory dump.""" from pathlib import Path from typing import Any, List, Optional, Tuple, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class ReadTheDocsLoader(BaseLoader): """Loader that loads ReadTheDocs documentation directory dump.""" def __init__( self, path: Union[str, Path], encoding: Optional[str] = None, errors: Optional[str] = None, custom_html_tag: Optional[Tuple[str, dict]] = None, **kwargs: Optional[Any] ): """ Initialize ReadTheDocsLoader The loader loops over all files under `path` and extract the actual content of the files by retrieving main html tags. Default main html tags include `<main id="main-content>`, <`div role="main>`, and `<article role="main">`. You can also define your own html tags by passing custom_html_tag, e.g. `("div", "class=main")`. The loader iterates html tags with the order of custom html tags (if exists) and default html tags. If any of the tags is not empty, the loop will break and retrieve the content out of that tag. Args: path: The location of pulled readthedocs folder. encoding: The encoding with which to open the documents. errors: Specifies how encoding and decoding errors are to be handled—this cannot be used in binary mode. custom_html_tag: Optional custom html tag to retrieve the content from files. """ try: from bs4 import BeautifulSoup except ImportError: raise ImportError(
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/readthedocs.html
ebe87a6aa180-1
from bs4 import BeautifulSoup except ImportError: raise ImportError( "Could not import python packages. " "Please install it with `pip install beautifulsoup4`. " ) try: _ = BeautifulSoup( "<html><body>Parser builder library test.</body></html>", **kwargs ) except Exception as e: raise ValueError("Parsing kwargs do not appear valid") from e self.file_path = Path(path) self.encoding = encoding self.errors = errors self.custom_html_tag = custom_html_tag self.bs_kwargs = kwargs [docs] def load(self) -> List[Document]: """Load documents.""" docs = [] for p in self.file_path.rglob("*"): if p.is_dir(): continue with open(p, encoding=self.encoding, errors=self.errors) as f: text = self._clean_data(f.read()) metadata = {"source": str(p)} docs.append(Document(page_content=text, metadata=metadata)) return docs def _clean_data(self, data: str) -> str: from bs4 import BeautifulSoup soup = BeautifulSoup(data, **self.bs_kwargs) # default tags html_tags = [ ("div", {"role": "main"}), ("main", {"id": "main-content"}), ] if self.custom_html_tag is not None: html_tags.append(self.custom_html_tag) text = None # reversed order. check the custom one first for tag, attrs in html_tags[::-1]: text = soup.find(tag, attrs) # if found, break if text is not None: break if text is not None:
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/readthedocs.html
ebe87a6aa180-2
if text is not None: break if text is not None: text = text.get_text() else: text = "" # trim empty lines return "\n".join([t for t in text.split("\n") if t])
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/readthedocs.html
2c7d9b0230a9-0
Source code for langchain.document_loaders.weather """Simple reader that reads weather data from OpenWeatherMap API""" from __future__ import annotations from datetime import datetime from typing import Iterator, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper [docs]class WeatherDataLoader(BaseLoader): """Weather Reader. Reads the forecast & current weather of any location using OpenWeatherMap's free API. Checkout 'https://openweathermap.org/appid' for more on how to generate a free OpenWeatherMap API. """ def __init__( self, client: OpenWeatherMapAPIWrapper, places: Sequence[str], ) -> None: """Initialize with parameters.""" super().__init__() self.client = client self.places = places [docs] @classmethod def from_params( cls, places: Sequence[str], *, openweathermap_api_key: Optional[str] = None ) -> WeatherDataLoader: client = OpenWeatherMapAPIWrapper(openweathermap_api_key=openweathermap_api_key) return cls(client, places) [docs] def lazy_load( self, ) -> Iterator[Document]: """Lazily load weather data for the given locations.""" for place in self.places: metadata = {"queried_at": datetime.now()} content = self.client.run(place) yield Document(page_content=content, metadata=metadata) [docs] def load( self, ) -> List[Document]: """Load weather data for the given locations.""" return list(self.lazy_load())
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/weather.html
b2d5d70c8574-0
Source code for langchain.document_loaders.html """Loader that uses unstructured to load HTML files.""" from typing import List from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class UnstructuredHTMLLoader(UnstructuredFileLoader): """Loader that uses unstructured to load HTML files.""" def _get_elements(self) -> List: from unstructured.partition.html import partition_html return partition_html(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/html.html
066e0ca83e1f-0
Source code for langchain.document_loaders.srt """Loader for .srt (subtitle) files.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class SRTLoader(BaseLoader): """Loader for .srt (subtitle) files.""" def __init__(self, file_path: str): """Initialize with file path.""" try: import pysrt # noqa:F401 except ImportError: raise ImportError( "package `pysrt` not found, please install it with `pip install pysrt`" ) self.file_path = file_path [docs] def load(self) -> List[Document]: """Load using pysrt file.""" import pysrt parsed_info = pysrt.open(self.file_path) text = " ".join([t.text for t in parsed_info]) metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/srt.html
189b839ab994-0
Source code for langchain.document_loaders.onedrive_file from __future__ import annotations import tempfile from typing import TYPE_CHECKING, List from pydantic import BaseModel, Field from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader if TYPE_CHECKING: from O365.drive import File CHUNK_SIZE = 1024 * 1024 * 5 [docs]class OneDriveFileLoader(BaseLoader, BaseModel): file: File = Field(...) class Config: arbitrary_types_allowed = True [docs] def load(self) -> List[Document]: """Load Documents""" with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.file.name}" self.file.download(to_path=temp_dir, chunk_size=CHUNK_SIZE) loader = UnstructuredFileLoader(file_path) return loader.load()
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/onedrive_file.html
83bd6e52afb4-0
Source code for langchain.document_loaders.s3_directory """Loading logic for loading documents from an s3 directory.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.s3_file import S3FileLoader [docs]class S3DirectoryLoader(BaseLoader): """Loading logic for loading documents from s3.""" def __init__(self, bucket: str, prefix: str = ""): """Initialize with bucket and key name.""" self.bucket = bucket self.prefix = prefix [docs] def load(self) -> List[Document]: """Load documents.""" try: import boto3 except ImportError: raise ImportError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) s3 = boto3.resource("s3") bucket = s3.Bucket(self.bucket) docs = [] for obj in bucket.objects.filter(Prefix=self.prefix): loader = S3FileLoader(self.bucket, obj.key) docs.extend(loader.load()) return docs
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/s3_directory.html
a37b874c974b-0
Source code for langchain.document_loaders.mhtml """Loader to load MHTML files, enriching metadata with page title.""" import email import logging from typing import Dict, List, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class MHTMLLoader(BaseLoader): """Loader that uses beautiful soup to parse HTML files.""" def __init__( self, file_path: str, open_encoding: Union[str, None] = None, bs_kwargs: Union[dict, None] = None, get_text_separator: str = "", ) -> None: """Initialise with path, and optionally, file encoding to use, and any kwargs to pass to the BeautifulSoup object.""" try: import bs4 # noqa:F401 except ImportError: raise ValueError( "beautifulsoup4 package not found, please install it with " "`pip install beautifulsoup4`" ) self.file_path = file_path self.open_encoding = open_encoding if bs_kwargs is None: bs_kwargs = {"features": "lxml"} self.bs_kwargs = bs_kwargs self.get_text_separator = get_text_separator [docs] def load(self) -> List[Document]: from bs4 import BeautifulSoup """Load MHTML document into document objects.""" with open(self.file_path, "r", encoding=self.open_encoding) as f: message = email.message_from_string(f.read()) parts = message.get_payload() if type(parts) is not list: parts = [message] for part in parts: if part.get_content_type() == "text/html":
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/mhtml.html
a37b874c974b-1
for part in parts: if part.get_content_type() == "text/html": html = part.get_payload(decode=True).decode() soup = BeautifulSoup(html, **self.bs_kwargs) text = soup.get_text(self.get_text_separator) if soup.title: title = str(soup.title.string) else: title = "" metadata: Dict[str, Union[str, None]] = { "source": self.file_path, "title": title, } return [Document(page_content=text, metadata=metadata)] return []
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/mhtml.html
b0f343f045af-0
Source code for langchain.document_loaders.gcs_file """Loading logic for loading documents from a GCS file.""" import os import tempfile from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class GCSFileLoader(BaseLoader): """Loading logic for loading documents from GCS.""" def __init__(self, project_name: str, bucket: str, blob: str): """Initialize with bucket and key name.""" self.bucket = bucket self.blob = blob self.project_name = project_name [docs] def load(self) -> List[Document]: """Load documents.""" try: from google.cloud import storage except ImportError: raise ValueError( "Could not import google-cloud-storage python package. " "Please install it with `pip install google-cloud-storage`." ) # Initialise a client storage_client = storage.Client(self.project_name) # Create a bucket object for our bucket bucket = storage_client.get_bucket(self.bucket) # Create a blob object from the filepath blob = bucket.blob(self.blob) with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.blob}" os.makedirs(os.path.dirname(file_path), exist_ok=True) # Download the file to a destination blob.download_to_filename(file_path) loader = UnstructuredFileLoader(file_path) return loader.load()
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/gcs_file.html
ed7445465f5a-0
Source code for langchain.document_loaders.toml import json from pathlib import Path from typing import Iterator, List, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class TomlLoader(BaseLoader): """ A TOML document loader that inherits from the BaseLoader class. This class can be initialized with either a single source file or a source directory containing TOML files. """ def __init__(self, source: Union[str, Path]): """Initialize the TomlLoader with a source file or directory.""" self.source = Path(source) [docs] def load(self) -> List[Document]: """Load and return all documents.""" return list(self.lazy_load()) [docs] def lazy_load(self) -> Iterator[Document]: """Lazily load the TOML documents from the source file or directory.""" import tomli if self.source.is_file() and self.source.suffix == ".toml": files = [self.source] elif self.source.is_dir(): files = list(self.source.glob("**/*.toml")) else: raise ValueError("Invalid source path or file type") for file_path in files: with file_path.open("r", encoding="utf-8") as file: content = file.read() try: data = tomli.loads(content) doc = Document( page_content=json.dumps(data), metadata={"source": str(file_path)}, ) yield doc except tomli.TOMLDecodeError as e: print(f"Error parsing TOML file {file_path}: {e}")
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/toml.html
b5cfa438f159-0
Source code for langchain.document_loaders.psychic """Loader that loads documents from Psychic.dev.""" from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class PsychicLoader(BaseLoader): """Loader that loads documents from Psychic.dev.""" def __init__( self, api_key: str, account_id: str, connector_id: Optional[str] = None ): """Initialize with API key, connector id, and account id.""" try: from psychicapi import ConnectorId, Psychic # noqa: F401 except ImportError: raise ImportError( "`psychicapi` package not found, please run `pip install psychicapi`" ) self.psychic = Psychic(secret_key=api_key) self.connector_id = ConnectorId(connector_id) self.account_id = account_id [docs] def load(self) -> List[Document]: """Load documents.""" psychic_docs = self.psychic.get_documents( connector_id=self.connector_id, account_id=self.account_id ) return [ Document( page_content=doc["content"], metadata={"title": doc["title"], "source": doc["uri"]}, ) for doc in psychic_docs.documents ]
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/psychic.html
ce173df95baf-0
Source code for langchain.document_loaders.iugu """Loader that fetches data from IUGU""" import json import urllib.request from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env, stringify_dict IUGU_ENDPOINTS = { "invoices": "https://api.iugu.com/v1/invoices", "customers": "https://api.iugu.com/v1/customers", "charges": "https://api.iugu.com/v1/charges", "subscriptions": "https://api.iugu.com/v1/subscriptions", "plans": "https://api.iugu.com/v1/plans", } [docs]class IuguLoader(BaseLoader): """Loader that fetches data from IUGU.""" def __init__(self, resource: str, api_token: Optional[str] = None) -> None: self.resource = resource api_token = api_token or get_from_env("api_token", "IUGU_API_TOKEN") self.headers = {"Authorization": f"Bearer {api_token}"} def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_dict(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = IUGU_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint) [docs] def load(self) -> List[Document]:
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/iugu.html
ce173df95baf-1
[docs] def load(self) -> List[Document]: return self._get_resource()
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/iugu.html
d988c2fb00fd-0
Source code for langchain.document_loaders.blob_loaders.youtube_audio from typing import Iterable, List from langchain.document_loaders.blob_loaders import FileSystemBlobLoader from langchain.document_loaders.blob_loaders.schema import Blob, BlobLoader [docs]class YoutubeAudioLoader(BlobLoader): """Load YouTube urls as audio file(s).""" def __init__(self, urls: List[str], save_dir: str): if not isinstance(urls, list): raise TypeError("urls must be a list") self.urls = urls self.save_dir = save_dir [docs] def yield_blobs(self) -> Iterable[Blob]: """Yield audio blobs for each url.""" try: import yt_dlp except ImportError: raise ValueError( "yt_dlp package not found, please install it with " "`pip install yt_dlp`" ) # Use yt_dlp to download audio given a YouTube url ydl_opts = { "format": "m4a/bestaudio/best", "noplaylist": True, "outtmpl": self.save_dir + "/%(title)s.%(ext)s", "postprocessors": [ { "key": "FFmpegExtractAudio", "preferredcodec": "m4a", } ], } for url in self.urls: # Download file with yt_dlp.YoutubeDL(ydl_opts) as ydl: ydl.download(url) # Yield the written blobs loader = FileSystemBlobLoader(self.save_dir, glob="*.m4a") for blob in loader.yield_blobs(): yield blob
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blob_loaders/youtube_audio.html
85670de318d1-0
Source code for langchain.document_loaders.blob_loaders.file_system """Use to load blobs from the local file system.""" from pathlib import Path from typing import Callable, Iterable, Iterator, Optional, Sequence, TypeVar, Union from langchain.document_loaders.blob_loaders.schema import Blob, BlobLoader T = TypeVar("T") def _make_iterator( length_func: Callable[[], int], show_progress: bool = False ) -> Callable[[Iterable[T]], Iterator[T]]: """Create a function that optionally wraps an iterable in tqdm.""" if show_progress: try: from tqdm.auto import tqdm except ImportError: raise ImportError( "You must install tqdm to use show_progress=True." "You can install tqdm with `pip install tqdm`." ) # Make sure to provide `total` here so that tqdm can show # a progress bar that takes into account the total number of files. def _with_tqdm(iterable: Iterable[T]) -> Iterator[T]: """Wrap an iterable in a tqdm progress bar.""" return tqdm(iterable, total=length_func()) iterator = _with_tqdm else: iterator = iter # type: ignore return iterator # PUBLIC API [docs]class FileSystemBlobLoader(BlobLoader): """Blob loader for the local file system. Example: .. code-block:: python from langchain.document_loaders.blob_loaders import FileSystemBlobLoader loader = FileSystemBlobLoader("/path/to/directory") for blob in loader.yield_blobs(): print(blob) """ def __init__( self, path: Union[str, Path], *, glob: str = "**/[!.]*",
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blob_loaders/file_system.html
85670de318d1-1
*, glob: str = "**/[!.]*", suffixes: Optional[Sequence[str]] = None, show_progress: bool = False, ) -> None: """Initialize with path to directory and how to glob over it. Args: path: Path to directory to load from glob: Glob pattern relative to the specified path by default set to pick up all non-hidden files suffixes: Provide to keep only files with these suffixes Useful when wanting to keep files with different suffixes Suffixes must include the dot, e.g. ".txt" show_progress: If true, will show a progress bar as the files are loaded. This forces an iteration through all matching files to count them prior to loading them. Examples: ... code-block:: python # Recursively load all text files in a directory. loader = FileSystemBlobLoader("/path/to/directory", glob="**/*.txt") # Recursively load all non-hidden files in a directory. loader = FileSystemBlobLoader("/path/to/directory", glob="**/[!.]*") # Load all files in a directory without recursion. loader = FileSystemBlobLoader("/path/to/directory", glob="*") """ if isinstance(path, Path): _path = path elif isinstance(path, str): _path = Path(path) else: raise TypeError(f"Expected str or Path, got {type(path)}") self.path = _path self.glob = glob self.suffixes = set(suffixes or []) self.show_progress = show_progress [docs] def yield_blobs( self, ) -> Iterable[Blob]:
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blob_loaders/file_system.html
85670de318d1-2
self, ) -> Iterable[Blob]: """Yield blobs that match the requested pattern.""" iterator = _make_iterator( length_func=self.count_matching_files, show_progress=self.show_progress ) for path in iterator(self._yield_paths()): yield Blob.from_path(path) def _yield_paths(self) -> Iterable[Path]: """Yield paths that match the requested pattern.""" paths = self.path.glob(self.glob) for path in paths: if path.is_file(): if self.suffixes and path.suffix not in self.suffixes: continue yield path [docs] def count_matching_files(self) -> int: """Count files that match the pattern without loading them.""" # Carry out a full iteration to count the files without # materializing anything expensive in memory. num = 0 for _ in self._yield_paths(): num += 1 return num
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blob_loaders/file_system.html
df139babe704-0
Source code for langchain.document_loaders.blob_loaders.schema """Schema for Blobs and Blob Loaders. The goal is to facilitate decoupling of content loading from content parsing code. In addition, content loading code should provide a lazy loading interface by default. """ from __future__ import annotations import contextlib import mimetypes from abc import ABC, abstractmethod from io import BufferedReader, BytesIO from pathlib import PurePath from typing import Any, Generator, Iterable, Mapping, Optional, Union from pydantic import BaseModel, root_validator PathLike = Union[str, PurePath] [docs]class Blob(BaseModel): """A blob is used to represent raw data by either reference or value. Provides an interface to materialize the blob in different representations, and help to decouple the development of data loaders from the downstream parsing of the raw data. Inspired by: https://developer.mozilla.org/en-US/docs/Web/API/Blob """ data: Union[bytes, str, None] # Raw data mimetype: Optional[str] = None # Not to be confused with a file extension encoding: str = "utf-8" # Use utf-8 as default encoding, if decoding to string # Location where the original content was found # Represent location on the local file system # Useful for situations where downstream code assumes it must work with file paths # rather than in-memory content. path: Optional[PathLike] = None class Config: arbitrary_types_allowed = True frozen = True @property def source(self) -> Optional[str]: """The source location of the blob as string if known otherwise none.""" return str(self.path) if self.path else None @root_validator(pre=True)
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blob_loaders/schema.html
df139babe704-1
return str(self.path) if self.path else None @root_validator(pre=True) def check_blob_is_valid(cls, values: Mapping[str, Any]) -> Mapping[str, Any]: """Verify that either data or path is provided.""" if "data" not in values and "path" not in values: raise ValueError("Either data or path must be provided") return values [docs] def as_string(self) -> str: """Read data as a string.""" if self.data is None and self.path: with open(str(self.path), "r", encoding=self.encoding) as f: return f.read() elif isinstance(self.data, bytes): return self.data.decode(self.encoding) elif isinstance(self.data, str): return self.data else: raise ValueError(f"Unable to get string for blob {self}") [docs] def as_bytes(self) -> bytes: """Read data as bytes.""" if isinstance(self.data, bytes): return self.data elif isinstance(self.data, str): return self.data.encode(self.encoding) elif self.data is None and self.path: with open(str(self.path), "rb") as f: return f.read() else: raise ValueError(f"Unable to get bytes for blob {self}") [docs] @contextlib.contextmanager def as_bytes_io(self) -> Generator[Union[BytesIO, BufferedReader], None, None]: """Read data as a byte stream.""" if isinstance(self.data, bytes): yield BytesIO(self.data) elif self.data is None and self.path: with open(str(self.path), "rb") as f: yield f else:
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blob_loaders/schema.html
df139babe704-2
yield f else: raise NotImplementedError(f"Unable to convert blob {self}") [docs] @classmethod def from_path( cls, path: PathLike, *, encoding: str = "utf-8", mime_type: Optional[str] = None, guess_type: bool = True, ) -> Blob: """Load the blob from a path like object. Args: path: path like object to file to be read encoding: Encoding to use if decoding the bytes into a string mime_type: if provided, will be set as the mime-type of the data guess_type: If True, the mimetype will be guessed from the file extension, if a mime-type was not provided Returns: Blob instance """ if mime_type is None and guess_type: _mimetype = mimetypes.guess_type(path)[0] if guess_type else None else: _mimetype = mime_type # We do not load the data immediately, instead we treat the blob as a # reference to the underlying data. return cls(data=None, mimetype=_mimetype, encoding=encoding, path=path) [docs] @classmethod def from_data( cls, data: Union[str, bytes], *, encoding: str = "utf-8", mime_type: Optional[str] = None, path: Optional[str] = None, ) -> Blob: """Initialize the blob from in-memory data. Args: data: the in-memory data associated with the blob encoding: Encoding to use if decoding the bytes into a string mime_type: if provided, will be set as the mime-type of the data
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blob_loaders/schema.html
df139babe704-3
mime_type: if provided, will be set as the mime-type of the data path: if provided, will be set as the source from which the data came Returns: Blob instance """ return cls(data=data, mimetype=mime_type, encoding=encoding, path=path) def __repr__(self) -> str: """Define the blob representation.""" str_repr = f"Blob {id(self)}" if self.source: str_repr += f" {self.source}" return str_repr [docs]class BlobLoader(ABC): """Abstract interface for blob loaders implementation. Implementer should be able to load raw content from a storage system according to some criteria and return the raw content lazily as a stream of blobs. """ [docs] @abstractmethod def yield_blobs( self, ) -> Iterable[Blob]: """A lazy loader for raw data represented by LangChain's Blob object. Returns: A generator over blobs """
https://api.python.langchain.com/en/stable/_modules/langchain/document_loaders/blob_loaders/schema.html
bb02300c7fac-0
Source code for langchain.embeddings.llamacpp """Wrapper around llama.cpp embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.embeddings.base import Embeddings [docs]class LlamaCppEmbeddings(BaseModel, Embeddings): """Wrapper around llama.cpp embedding models. To use, you should have the llama-cpp-python library installed, and provide the path to the Llama model as a named parameter to the constructor. Check out: https://github.com/abetlen/llama-cpp-python Example: .. code-block:: python from langchain.embeddings import LlamaCppEmbeddings llama = LlamaCppEmbeddings(model_path="/path/to/model.bin") """ client: Any #: :meta private: model_path: str n_ctx: int = Field(512, alias="n_ctx") """Token context window.""" n_parts: int = Field(-1, alias="n_parts") """Number of parts to split the model into. If -1, the number of parts is automatically determined.""" seed: int = Field(-1, alias="seed") """Seed. If -1, a random seed is used.""" f16_kv: bool = Field(False, alias="f16_kv") """Use half-precision for key/value cache.""" logits_all: bool = Field(False, alias="logits_all") """Return logits for all tokens, not just the last token.""" vocab_only: bool = Field(False, alias="vocab_only") """Only load the vocabulary, no weights.""" use_mlock: bool = Field(False, alias="use_mlock")
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/llamacpp.html
bb02300c7fac-1
use_mlock: bool = Field(False, alias="use_mlock") """Force system to keep model in RAM.""" n_threads: Optional[int] = Field(None, alias="n_threads") """Number of threads to use. If None, the number of threads is automatically determined.""" n_batch: Optional[int] = Field(8, alias="n_batch") """Number of tokens to process in parallel. Should be a number between 1 and n_ctx.""" n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers") """Number of layers to be loaded into gpu memory. Default None.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that llama-cpp-python library is installed.""" model_path = values["model_path"] model_param_names = [ "n_ctx", "n_parts", "seed", "f16_kv", "logits_all", "vocab_only", "use_mlock", "n_threads", "n_batch", ] model_params = {k: values[k] for k in model_param_names} # For backwards compatibility, only include if non-null. if values["n_gpu_layers"] is not None: model_params["n_gpu_layers"] = values["n_gpu_layers"] try: from llama_cpp import Llama values["client"] = Llama(model_path, embedding=True, **model_params) except ImportError: raise ModuleNotFoundError( "Could not import llama-cpp-python library. "
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/llamacpp.html
bb02300c7fac-2
raise ModuleNotFoundError( "Could not import llama-cpp-python library. " "Please install the llama-cpp-python library to " "use this embedding model: pip install llama-cpp-python" ) except Exception as e: raise ValueError( f"Could not load Llama model from path: {model_path}. " f"Received error {e}" ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Embed a list of documents using the Llama model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = [self.client.embed(text) for text in texts] return [list(map(float, e)) for e in embeddings] [docs] def embed_query(self, text: str) -> List[float]: """Embed a query using the Llama model. Args: text: The text to embed. Returns: Embeddings for the text. """ embedding = self.client.embed(text) return list(map(float, embedding))
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/llamacpp.html
725ad95173bc-0
Source code for langchain.embeddings.aleph_alpha from typing import Any, Dict, List, Optional from pydantic import BaseModel, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings): """ Wrapper for Aleph Alpha's Asymmetric Embeddings AA provides you with an endpoint to embed a document and a query. The models were optimized to make the embeddings of documents and the query for a document as similar as possible. To learn more, check out: https://docs.aleph-alpha.com/docs/tasks/semantic_embed/ Example: .. code-block:: python from aleph_alpha import AlephAlphaAsymmetricSemanticEmbedding embeddings = AlephAlphaSymmetricSemanticEmbedding() document = "This is a content of the document" query = "What is the content of the document?" doc_result = embeddings.embed_documents([document]) query_result = embeddings.embed_query(query) """ client: Any #: :meta private: model: Optional[str] = "luminous-base" """Model name to use.""" hosting: Optional[str] = "https://api.aleph-alpha.com" """Optional parameter that specifies which datacenters may process the request.""" normalize: Optional[bool] = True """Should returned embeddings be normalized""" compress_to_size: Optional[int] = 128 """Should the returned embeddings come back as an original 5120-dim vector, or should it be compressed to 128-dim.""" contextual_control_threshold: Optional[int] = None """Attention control parameters only apply to those tokens that have
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/aleph_alpha.html
725ad95173bc-1
"""Attention control parameters only apply to those tokens that have explicitly been set in the request.""" control_log_additive: Optional[bool] = True """Apply controls on prompt items by adding the log(control_factor) to attention scores.""" aleph_alpha_api_key: Optional[str] = None """API key for Aleph Alpha API.""" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: from aleph_alpha_client import Client except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) values["client"] = Client(token=aleph_alpha_api_key) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Aleph Alpha's asymmetric Document endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) document_embeddings = [] for text in texts: document_params = { "prompt": Prompt.from_text(text),
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/aleph_alpha.html
725ad95173bc-2
document_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Document, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, } document_request = SemanticEmbeddingRequest(**document_params) document_response = self.client.semantic_embed( request=document_request, model=self.model ) document_embeddings.append(document_response.embedding) return document_embeddings [docs] def embed_query(self, text: str) -> List[float]: """Call out to Aleph Alpha's asymmetric, query embedding endpoint Args: text: The text to embed. Returns: Embeddings for the text. """ try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) symmetric_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Query, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, } symmetric_request = SemanticEmbeddingRequest(**symmetric_params) symmetric_response = self.client.semantic_embed( request=symmetric_request, model=self.model ) return symmetric_response.embedding
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/aleph_alpha.html
725ad95173bc-3
request=symmetric_request, model=self.model ) return symmetric_response.embedding [docs]class AlephAlphaSymmetricSemanticEmbedding(AlephAlphaAsymmetricSemanticEmbedding): """The symmetric version of the Aleph Alpha's semantic embeddings. The main difference is that here, both the documents and queries are embedded with a SemanticRepresentation.Symmetric Example: .. code-block:: python from aleph_alpha import AlephAlphaSymmetricSemanticEmbedding embeddings = AlephAlphaAsymmetricSemanticEmbedding() text = "This is a test text" doc_result = embeddings.embed_documents([text]) query_result = embeddings.embed_query(text) """ def _embed(self, text: str) -> List[float]: try: from aleph_alpha_client import ( Prompt, SemanticEmbeddingRequest, SemanticRepresentation, ) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) query_params = { "prompt": Prompt.from_text(text), "representation": SemanticRepresentation.Symmetric, "compress_to_size": self.compress_to_size, "normalize": self.normalize, "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, } query_request = SemanticEmbeddingRequest(**query_params) query_response = self.client.semantic_embed( request=query_request, model=self.model ) return query_response.embedding [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Aleph Alpha's Document endpoint.
https://api.python.langchain.com/en/stable/_modules/langchain/embeddings/aleph_alpha.html